Commit 9867d76c authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields

knfsd: convert knfsd to kthread API

This patch is rather large, but I couldn't figure out a way to break it
up that would remain bisectable. It does several things:

- change svc_thread_fn typedef to better match what kthread_create expects
- change svc_pool_map_set_cpumask to be more kthread friendly. Make it
  take a task arg and and get rid of the "oldmask"
- have svc_set_num_threads call kthread_create directly
- eliminate __svc_create_thread
Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent e096bbc6
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/kthread.h>
#include <linux/sunrpc/types.h> #include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h> #include <linux/sunrpc/stats.h>
...@@ -46,7 +47,7 @@ ...@@ -46,7 +47,7 @@
#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT)) #define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
extern struct svc_program nfsd_program; extern struct svc_program nfsd_program;
static void nfsd(struct svc_rqst *rqstp); static int nfsd(void *vrqstp);
struct timeval nfssvc_boot; struct timeval nfssvc_boot;
static atomic_t nfsd_busy; static atomic_t nfsd_busy;
static unsigned long nfsd_last_call; static unsigned long nfsd_last_call;
...@@ -407,18 +408,19 @@ update_thread_usage(int busy_threads) ...@@ -407,18 +408,19 @@ update_thread_usage(int busy_threads)
/* /*
* This is the NFS server kernel thread * This is the NFS server kernel thread
*/ */
static void static int
nfsd(struct svc_rqst *rqstp) nfsd(void *vrqstp)
{ {
struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
struct fs_struct *fsp; struct fs_struct *fsp;
int err;
sigset_t shutdown_mask, allowed_mask; sigset_t shutdown_mask, allowed_mask;
int err, preverr = 0;
unsigned int signo;
/* Lock module and set up kernel thread */ /* Lock module and set up kernel thread */
mutex_lock(&nfsd_mutex); mutex_lock(&nfsd_mutex);
daemonize("nfsd");
/* After daemonize() this kernel thread shares current->fs /* At this point, the thread shares current->fs
* with the init process. We need to create files with a * with the init process. We need to create files with a
* umask of 0 instead of init's umask. */ * umask of 0 instead of init's umask. */
fsp = copy_fs_struct(current->fs); fsp = copy_fs_struct(current->fs);
...@@ -433,14 +435,18 @@ nfsd(struct svc_rqst *rqstp) ...@@ -433,14 +435,18 @@ nfsd(struct svc_rqst *rqstp)
siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS); siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
siginitsetinv(&allowed_mask, ALLOWED_SIGS); siginitsetinv(&allowed_mask, ALLOWED_SIGS);
/*
* thread is spawned with all signals set to SIG_IGN, re-enable
* the ones that matter
*/
for (signo = 1; signo <= _NSIG; signo++) {
if (!sigismember(&shutdown_mask, signo))
allow_signal(signo);
}
nfsdstats.th_cnt++; nfsdstats.th_cnt++;
rqstp->rq_task = current;
mutex_unlock(&nfsd_mutex); mutex_unlock(&nfsd_mutex);
/* /*
* We want less throttling in balance_dirty_pages() so that nfs to * We want less throttling in balance_dirty_pages() so that nfs to
* localhost doesn't cause nfsd to lock up due to all the client's * localhost doesn't cause nfsd to lock up due to all the client's
...@@ -462,15 +468,25 @@ nfsd(struct svc_rqst *rqstp) ...@@ -462,15 +468,25 @@ nfsd(struct svc_rqst *rqstp)
*/ */
while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN) while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
; ;
if (err < 0) if (err == -EINTR)
break; break;
else if (err < 0) {
if (err != preverr) {
printk(KERN_WARNING "%s: unexpected error "
"from svc_recv (%d)\n", __func__, -err);
preverr = err;
}
schedule_timeout_uninterruptible(HZ);
continue;
}
update_thread_usage(atomic_read(&nfsd_busy)); update_thread_usage(atomic_read(&nfsd_busy));
atomic_inc(&nfsd_busy); atomic_inc(&nfsd_busy);
/* Lock the export hash tables for reading. */ /* Lock the export hash tables for reading. */
exp_readlock(); exp_readlock();
/* Process request with signals blocked. */ /* Process request with signals blocked. */
sigprocmask(SIG_SETMASK, &allowed_mask, NULL); sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
svc_process(rqstp); svc_process(rqstp);
...@@ -481,14 +497,10 @@ nfsd(struct svc_rqst *rqstp) ...@@ -481,14 +497,10 @@ nfsd(struct svc_rqst *rqstp)
atomic_dec(&nfsd_busy); atomic_dec(&nfsd_busy);
} }
if (err != -EINTR)
printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
/* Clear signals before calling svc_exit_thread() */ /* Clear signals before calling svc_exit_thread() */
flush_signals(current); flush_signals(current);
mutex_lock(&nfsd_mutex); mutex_lock(&nfsd_mutex);
nfsdstats.th_cnt --; nfsdstats.th_cnt --;
out: out:
...@@ -498,6 +510,7 @@ nfsd(struct svc_rqst *rqstp) ...@@ -498,6 +510,7 @@ nfsd(struct svc_rqst *rqstp)
/* Release module */ /* Release module */
mutex_unlock(&nfsd_mutex); mutex_unlock(&nfsd_mutex);
module_put_and_exit(0); module_put_and_exit(0);
return 0;
} }
static __be32 map_new_errors(u32 vers, __be32 nfserr) static __be32 map_new_errors(u32 vers, __be32 nfserr)
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
/* /*
* This is the RPC server thread function prototype * This is the RPC server thread function prototype
*/ */
typedef void (*svc_thread_fn)(struct svc_rqst *); typedef int (*svc_thread_fn)(void *);
/* /*
* *
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kthread.h>
#include <linux/sunrpc/types.h> #include <linux/sunrpc/types.h>
#include <linux/sunrpc/xdr.h> #include <linux/sunrpc/xdr.h>
...@@ -291,15 +292,14 @@ svc_pool_map_put(void) ...@@ -291,15 +292,14 @@ svc_pool_map_put(void)
/* /*
* Set the current thread's cpus_allowed mask so that it * Set the given thread's cpus_allowed mask so that it
* will only run on cpus in the given pool. * will only run on cpus in the given pool.
*
* Returns 1 and fills in oldmask iff a cpumask was applied.
*/ */
static inline int static inline void
svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
{ {
struct svc_pool_map *m = &svc_pool_map; struct svc_pool_map *m = &svc_pool_map;
unsigned int node = m->pool_to[pidx];
/* /*
* The caller checks for sv_nrpools > 1, which * The caller checks for sv_nrpools > 1, which
...@@ -307,26 +307,17 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) ...@@ -307,26 +307,17 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
*/ */
BUG_ON(m->count == 0); BUG_ON(m->count == 0);
switch (m->mode) switch (m->mode) {
{
default:
return 0;
case SVC_POOL_PERCPU: case SVC_POOL_PERCPU:
{ {
unsigned int cpu = m->pool_to[pidx]; set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
break;
*oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
return 1;
} }
case SVC_POOL_PERNODE: case SVC_POOL_PERNODE:
{ {
unsigned int node = m->pool_to[pidx];
node_to_cpumask_ptr(nodecpumask, node); node_to_cpumask_ptr(nodecpumask, node);
set_cpus_allowed_ptr(task, nodecpumask);
*oldmask = current->cpus_allowed; break;
set_cpus_allowed_ptr(current, nodecpumask);
return 1;
} }
} }
} }
...@@ -578,47 +569,6 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) ...@@ -578,47 +569,6 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
} }
EXPORT_SYMBOL(svc_prepare_thread); EXPORT_SYMBOL(svc_prepare_thread);
/*
* Create a thread in the given pool. Caller must hold BKL or another lock to
* serialize access to the svc_serv struct. On a NUMA or SMP machine, with a
* multi-pool serv, the thread will be restricted to run on the cpus belonging
* to the pool.
*/
static int
__svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
struct svc_pool *pool)
{
struct svc_rqst *rqstp;
int error = -ENOMEM;
int have_oldmask = 0;
cpumask_t uninitialized_var(oldmask);
rqstp = svc_prepare_thread(serv, pool);
if (IS_ERR(rqstp)) {
error = PTR_ERR(rqstp);
goto out;
}
if (serv->sv_nrpools > 1)
have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
error = kernel_thread((int (*)(void *)) func, rqstp, 0);
if (have_oldmask)
set_cpus_allowed(current, oldmask);
if (error < 0)
goto out_thread;
svc_sock_update_bufs(serv);
error = 0;
out:
return error;
out_thread:
svc_exit_thread(rqstp);
goto out;
}
/* /*
* Choose a pool in which to create a new thread, for svc_set_num_threads * Choose a pool in which to create a new thread, for svc_set_num_threads
*/ */
...@@ -688,7 +638,9 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) ...@@ -688,7 +638,9 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
int int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{ {
struct task_struct *victim; struct svc_rqst *rqstp;
struct task_struct *task;
struct svc_pool *chosen_pool;
int error = 0; int error = 0;
unsigned int state = serv->sv_nrthreads-1; unsigned int state = serv->sv_nrthreads-1;
...@@ -704,18 +656,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) ...@@ -704,18 +656,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
/* create new threads */ /* create new threads */
while (nrservs > 0) { while (nrservs > 0) {
nrservs--; nrservs--;
chosen_pool = choose_pool(serv, pool, &state);
rqstp = svc_prepare_thread(serv, chosen_pool);
if (IS_ERR(rqstp)) {
error = PTR_ERR(rqstp);
break;
}
__module_get(serv->sv_module); __module_get(serv->sv_module);
error = __svc_create_thread(serv->sv_function, serv, task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
choose_pool(serv, pool, &state)); if (IS_ERR(task)) {
if (error < 0) { error = PTR_ERR(task);
module_put(serv->sv_module); module_put(serv->sv_module);
svc_exit_thread(rqstp);
break; break;
} }
rqstp->rq_task = task;
if (serv->sv_nrpools > 1)
svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
svc_sock_update_bufs(serv);
wake_up_process(task);
} }
/* destroy old threads */ /* destroy old threads */
while (nrservs < 0 && while (nrservs < 0 &&
(victim = choose_victim(serv, pool, &state)) != NULL) { (task = choose_victim(serv, pool, &state)) != NULL) {
send_sig(serv->sv_kill_signal, victim, 1); send_sig(serv->sv_kill_signal, task, 1);
nrservs++; nrservs++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment