Commit fa341560 authored by NeilBrown's avatar NeilBrown Committed by Chuck Lever

SUNRPC: change how svc threads are asked to exit.

svc threads are currently stopped using kthread_stop().  This requires
identifying a specific thread.  However we don't care which thread
stops, just as long as one does.

So instead, set a flag in the svc_pool to say that a thread needs to
die, and have each thread check this flag instead of calling
kthread_should_stop().  The first thread to find and clear this flag
then moves towards exiting.

This removes an explicit dependency on sp_all_threads which will make a
future patch simpler.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent f4578ba1
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
...@@ -135,11 +134,11 @@ lockd(void *vrqstp) ...@@ -135,11 +134,11 @@ lockd(void *vrqstp)
* The main request loop. We don't terminate until the last * The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away. * NFS mount or NFS daemon has gone away.
*/ */
while (!kthread_should_stop()) { while (!svc_thread_should_stop(rqstp)) {
/* update sv_maxconn if it has changed */ /* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections; rqstp->rq_server->sv_maxconn = nlm_max_connections;
nlmsvc_retry_blocked(); nlmsvc_retry_blocked(rqstp);
svc_recv(rqstp); svc_recv(rqstp);
} }
if (nlmsvc_ops) if (nlmsvc_ops)
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/nlm.h> #include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h> #include <linux/lockd/lockd.h>
#include <linux/kthread.h>
#include <linux/exportfs.h> #include <linux/exportfs.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK #define NLMDBG_FACILITY NLMDBG_SVCLOCK
...@@ -1032,13 +1031,13 @@ retry_deferred_block(struct nlm_block *block) ...@@ -1032,13 +1031,13 @@ retry_deferred_block(struct nlm_block *block)
* be retransmitted. * be retransmitted.
*/ */
void void
nlmsvc_retry_blocked(void) nlmsvc_retry_blocked(struct svc_rqst *rqstp)
{ {
unsigned long timeout = MAX_SCHEDULE_TIMEOUT; unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block; struct nlm_block *block;
spin_lock(&nlm_blocked_lock); spin_lock(&nlm_blocked_lock);
while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list); block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER) if (block->b_when == NLM_NEVER)
......
...@@ -78,7 +78,7 @@ nfs4_callback_svc(void *vrqstp) ...@@ -78,7 +78,7 @@ nfs4_callback_svc(void *vrqstp)
set_freezable(); set_freezable();
while (!kthread_should_stop()) while (!svc_thread_should_stop(rqstp))
svc_recv(rqstp); svc_recv(rqstp);
svc_exit_thread(rqstp); svc_exit_thread(rqstp);
......
...@@ -1329,7 +1329,8 @@ extern void nfs_sb_deactive(struct super_block *sb); ...@@ -1329,7 +1329,8 @@ extern void nfs_sb_deactive(struct super_block *sb);
* setup a work entry in the ssc delayed unmount list. * setup a work entry in the ssc delayed unmount list.
*/ */
static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr, static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
struct nfsd4_ssc_umount_item **nsui) struct nfsd4_ssc_umount_item **nsui,
struct svc_rqst *rqstp)
{ {
struct nfsd4_ssc_umount_item *ni = NULL; struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *work = NULL; struct nfsd4_ssc_umount_item *work = NULL;
...@@ -1351,7 +1352,7 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr, ...@@ -1351,7 +1352,7 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
spin_unlock(&nn->nfsd_ssc_lock); spin_unlock(&nn->nfsd_ssc_lock);
/* allow 20secs for mount/unmount for now - revisit */ /* allow 20secs for mount/unmount for now - revisit */
if (kthread_should_stop() || if (svc_thread_should_stop(rqstp) ||
(schedule_timeout(20*HZ) == 0)) { (schedule_timeout(20*HZ) == 0)) {
finish_wait(&nn->nfsd_ssc_waitq, &wait); finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work); kfree(work);
...@@ -1467,7 +1468,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp, ...@@ -1467,7 +1468,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
goto out_free_rawdata; goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep); snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui); status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui, rqstp);
if (status) if (status)
goto out_free_devname; goto out_free_devname;
if ((*nsui)->nsui_vfsmount) if ((*nsui)->nsui_vfsmount)
...@@ -1642,6 +1643,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy, ...@@ -1642,6 +1643,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
if (bytes_total == 0) if (bytes_total == 0)
bytes_total = ULLONG_MAX; bytes_total = ULLONG_MAX;
do { do {
/* Only async copies can be stopped here */
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos, bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
......
...@@ -957,7 +957,7 @@ nfsd(void *vrqstp) ...@@ -957,7 +957,7 @@ nfsd(void *vrqstp)
/* /*
* The main request loop * The main request loop
*/ */
while (!kthread_should_stop()) { while (!svc_thread_should_stop(rqstp)) {
/* Update sv_maxconn if it has changed */ /* Update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nn->max_connections; rqstp->rq_server->sv_maxconn = nn->max_connections;
......
...@@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, ...@@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *); struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
void nlmsvc_retry_blocked(void); void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match); nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32); void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
......
...@@ -50,6 +50,8 @@ struct svc_pool { ...@@ -50,6 +50,8 @@ struct svc_pool {
enum { enum {
SP_TASK_PENDING, /* still work to do even if no xprt is queued */ SP_TASK_PENDING, /* still work to do even if no xprt is queued */
SP_CONGESTED, /* all threads are busy, none idle */ SP_CONGESTED, /* all threads are busy, none idle */
SP_NEED_VICTIM, /* One thread needs to agree to exit */
SP_VICTIM_REMAINS, /* One thread needs to actually exit */
}; };
...@@ -259,7 +261,7 @@ enum { ...@@ -259,7 +261,7 @@ enum {
RQ_DROPME, /* drop current reply */ RQ_DROPME, /* drop current reply */
RQ_SPLICE_OK, /* turned off in gss privacy to prevent RQ_SPLICE_OK, /* turned off in gss privacy to prevent
* encrypting page cache pages */ * encrypting page cache pages */
RQ_VICTIM, /* about to be shut down */ RQ_VICTIM, /* Have agreed to shut down */
RQ_BUSY, /* request is busy */ RQ_BUSY, /* request is busy */
RQ_DATA, /* request has data */ RQ_DATA, /* request has data */
}; };
...@@ -299,6 +301,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst) ...@@ -299,6 +301,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr; return (struct sockaddr *) &rqst->rq_daddr;
} }
/**
* svc_thread_should_stop - check if this thread should stop
* @rqstp: the thread that might need to stop
*
* To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
* are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
* the victim using this function. It should then promptly call
* svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
* so the task waiting for a thread to exit can wake and continue.
*
* Return values:
* %true: caller should invoke svc_exit_thread()
* %false: caller should do nothing
*/
static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
{
if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
set_bit(RQ_VICTIM, &rqstp->rq_flags);
return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
struct svc_deferred_req { struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */ u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt; struct svc_xprt *xprt;
......
...@@ -725,19 +725,22 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) ...@@ -725,19 +725,22 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
} }
static struct task_struct * static struct svc_pool *
svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
{ {
unsigned int i; unsigned int i;
struct task_struct *task = NULL;
if (pool != NULL) { if (pool != NULL) {
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if (pool->sp_nrthreads)
goto found_pool;
spin_unlock_bh(&pool->sp_lock);
return NULL;
} else { } else {
for (i = 0; i < serv->sv_nrpools; i++) { for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if (!list_empty(&pool->sp_all_threads)) if (pool->sp_nrthreads)
goto found_pool; goto found_pool;
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
} }
...@@ -745,16 +748,10 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *stat ...@@ -745,16 +748,10 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *stat
} }
found_pool: found_pool:
if (!list_empty(&pool->sp_all_threads)) { set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
struct svc_rqst *rqstp; set_bit(SP_NEED_VICTIM, &pool->sp_flags);
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
set_bit(RQ_VICTIM, &rqstp->rq_flags);
list_del_rcu(&rqstp->rq_all);
task = rqstp->rq_task;
}
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
return task; return pool;
} }
static int static int
...@@ -795,18 +792,16 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) ...@@ -795,18 +792,16 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
static int static int
svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{ {
struct svc_rqst *rqstp;
struct task_struct *task;
unsigned int state = serv->sv_nrthreads-1; unsigned int state = serv->sv_nrthreads-1;
struct svc_pool *victim;
do { do {
task = svc_pool_victim(serv, pool, &state); victim = svc_pool_victim(serv, pool, &state);
if (task == NULL) if (!victim)
break; break;
rqstp = kthread_data(task); svc_pool_wake_idle_thread(victim);
/* Did we lose a race to svo_function threadfn? */ wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS,
if (kthread_stop(task) == -EINTR) TASK_IDLE);
svc_exit_thread(rqstp);
nrservs++; nrservs++;
} while (nrservs < 0); } while (nrservs < 0);
return 0; return 0;
...@@ -926,7 +921,6 @@ svc_exit_thread(struct svc_rqst *rqstp) ...@@ -926,7 +921,6 @@ svc_exit_thread(struct svc_rqst *rqstp)
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--; pool->sp_nrthreads--;
if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
list_del_rcu(&rqstp->rq_all); list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
...@@ -938,6 +932,11 @@ svc_exit_thread(struct svc_rqst *rqstp) ...@@ -938,6 +932,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
svc_rqst_free(rqstp); svc_rqst_free(rqstp);
svc_put(serv); svc_put(serv);
/* That svc_put() cannot be the last, because the thread
* waiting for SP_VICTIM_REMAINS to clear must hold
* a reference. So it is still safe to access pool.
*/
clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
} }
EXPORT_SYMBOL_GPL(svc_exit_thread); EXPORT_SYMBOL_GPL(svc_exit_thread);
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/sunrpc/addr.h> #include <linux/sunrpc/addr.h>
...@@ -675,7 +674,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp) ...@@ -675,7 +674,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
continue; continue;
set_current_state(TASK_IDLE); set_current_state(TASK_IDLE);
if (kthread_should_stop()) { if (svc_thread_should_stop(rqstp)) {
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
return false; return false;
} }
...@@ -713,7 +712,7 @@ rqst_should_sleep(struct svc_rqst *rqstp) ...@@ -713,7 +712,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return false; return false;
/* are we shutting down? */ /* are we shutting down? */
if (kthread_should_stop()) if (svc_thread_should_stop(rqstp))
return false; return false;
/* are we freezing? */ /* are we freezing? */
...@@ -858,7 +857,7 @@ void svc_recv(struct svc_rqst *rqstp) ...@@ -858,7 +857,7 @@ void svc_recv(struct svc_rqst *rqstp)
clear_bit(SP_TASK_PENDING, &pool->sp_flags); clear_bit(SP_TASK_PENDING, &pool->sp_flags);
if (kthread_should_stop()) if (svc_thread_should_stop(rqstp))
return; return;
rqstp->rq_xprt = svc_xprt_dequeue(pool); rqstp->rq_xprt = svc_xprt_dequeue(pool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment