Commit 063ab935 authored by NeilBrown's avatar NeilBrown Committed by Chuck Lever

SUNRPC: integrate back-channel processing with svc_recv()

Using svc_recv() for (NFSv4.1) back-channel handling means we have just
one mechanism for waking threads.

Also change kthread_freezable_should_stop() in nfs4_callback_svc() to
kthread_should_stop() as used elsewhere.
kthread_freezable_should_stop() effectively adds a try_to_freeze() call,
and svc_recv() already contains that at an appropriate place.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 6ed8cdf9
......@@ -78,7 +78,7 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
while (!kthread_freezable_should_stop(NULL))
while (!kthread_should_stop())
svc_recv(rqstp);
svc_exit_thread(rqstp);
......@@ -86,41 +86,6 @@ nfs4_callback_svc(void *vrqstp)
}
#if defined(CONFIG_NFS_V4_1)
/*
* The callback service for NFSv4.1 callbacks
*/
static int
nfs41_callback_svc(void *vrqstp)
{
struct svc_rqst *rqstp = vrqstp;
struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req;
DEFINE_WAIT(wq);
set_freezable();
while (!kthread_freezable_should_stop(NULL)) {
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
finish_wait(&serv->sv_cb_waitq, &wq);
svc_process_bc(req, rqstp);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
if (!kthread_should_stop())
schedule();
finish_wait(&serv->sv_cb_waitq, &wq);
}
}
svc_exit_thread(rqstp);
return 0;
}
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
{
......@@ -233,10 +198,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
cb_info->users);
threadfn = nfs4_callback_svc;
#if defined(CONFIG_NFS_V4_1)
if (minorversion)
threadfn = nfs41_callback_svc;
#else
#if !defined(CONFIG_NFS_V4_1)
if (minorversion)
return ERR_PTR(-ENOTSUPP);
#endif
......
......@@ -92,8 +92,6 @@ struct svc_serv {
* that arrive over the same
* connection */
spinlock_t sv_cb_lock; /* protects the svc_cb_list */
wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
* entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
......
......@@ -349,10 +349,8 @@ struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
}
/*
* Add callback request to callback list. The callback
* service sleeps on the sv_cb_waitq waiting for new
* requests. Wake it up after adding enqueing the
* request.
* Add callback request to callback list. Wake a thread
* on the first pool (usually the only pool) to handle it.
*/
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
......@@ -371,6 +369,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock);
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
wake_up(&bc_serv->sv_cb_waitq);
spin_unlock(&bc_serv->sv_cb_lock);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
}
......@@ -440,7 +440,6 @@ __svc_init_bc(struct svc_serv *serv)
{
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
init_waitqueue_head(&serv->sv_cb_waitq);
}
#else
static void
......@@ -718,6 +717,7 @@ void svc_pool_wake_idle_thread(struct svc_pool *pool)
set_bit(SP_CONGESTED, &pool->sp_flags);
}
EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
static struct svc_pool *
svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
......
......@@ -17,6 +17,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <trace/events/sunrpc.h>
......@@ -719,6 +720,13 @@ rqst_should_sleep(struct svc_rqst *rqstp)
if (freezing(current))
return false;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (svc_is_backchannel(rqstp)) {
if (!list_empty(&rqstp->rq_server->sv_cb_list))
return false;
}
#endif
return true;
}
......@@ -868,6 +876,25 @@ void svc_recv(struct svc_rqst *rqstp)
trace_svc_xprt_dequeue(rqstp);
svc_handle_xprt(rqstp, xprt);
}
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (svc_is_backchannel(rqstp)) {
struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req;
spin_lock_bh(&serv->sv_cb_lock);
req = list_first_entry_or_null(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
if (req) {
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
svc_process_bc(req, rqstp);
return;
}
spin_unlock_bh(&serv->sv_cb_lock);
}
#endif
}
EXPORT_SYMBOL_GPL(svc_recv);
......
......@@ -267,7 +267,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
spin_unlock(&bc_serv->sv_cb_lock);
wake_up(&bc_serv->sv_cb_waitq);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
r_xprt->rx_stats.bcall_count++;
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment