Commit 15d39883 authored by NeilBrown's avatar NeilBrown Committed by Chuck Lever

SUNRPC: change the back-channel queue to lwq

This removes the need to store and update back-links in the list.
It also remove the need for the _bh version of spin_lock().
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 580a2575
...@@ -90,10 +90,9 @@ struct svc_serv { ...@@ -90,10 +90,9 @@ struct svc_serv {
int (*sv_threadfn)(void *data); int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same * that arrive over the same
* connection */ * connection */
spinlock_t sv_cb_lock; /* protects the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */ bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */
}; };
......
...@@ -57,6 +57,7 @@ struct xprt_class; ...@@ -57,6 +57,7 @@ struct xprt_class;
struct seq_file; struct seq_file;
struct svc_serv; struct svc_serv;
struct net; struct net;
#include <linux/lwq.h>
/* /*
* This describes a complete RPC request * This describes a complete RPC request
...@@ -121,7 +122,7 @@ struct rpc_rqst { ...@@ -121,7 +122,7 @@ struct rpc_rqst {
int rq_ntrans; int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head rq_bc_list; /* Callback service list */ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */ #endif /* CONFIG_SUNRPC_BACKCHANEL */
......
...@@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt) ...@@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
return NULL; return NULL;
req->rq_xprt = xprt; req->rq_xprt = xprt;
INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */ /* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) { if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
...@@ -367,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) ...@@ -367,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
dprintk("RPC: add callback request to list\n"); dprintk("RPC: add callback request to list\n");
xprt_get(xprt); xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock); lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
spin_unlock(&bc_serv->sv_cb_lock);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
} }
...@@ -438,8 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind); ...@@ -438,8 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
static void static void
__svc_init_bc(struct svc_serv *serv) __svc_init_bc(struct svc_serv *serv)
{ {
INIT_LIST_HEAD(&serv->sv_cb_list); lwq_init(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
} }
#else #else
static void static void
......
...@@ -705,7 +705,7 @@ svc_thread_should_sleep(struct svc_rqst *rqstp) ...@@ -705,7 +705,7 @@ svc_thread_should_sleep(struct svc_rqst *rqstp)
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (svc_is_backchannel(rqstp)) { if (svc_is_backchannel(rqstp)) {
if (!list_empty(&rqstp->rq_server->sv_cb_list)) if (!lwq_empty(&rqstp->rq_server->sv_cb_list))
return false; return false;
} }
#endif #endif
...@@ -878,18 +878,12 @@ void svc_recv(struct svc_rqst *rqstp) ...@@ -878,18 +878,12 @@ void svc_recv(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server; struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req; struct rpc_rqst *req;
spin_lock_bh(&serv->sv_cb_lock); req = lwq_dequeue(&serv->sv_cb_list,
req = list_first_entry_or_null(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list); struct rpc_rqst, rq_bc_list);
if (req) { if (req) {
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
svc_thread_wake_next(rqstp); svc_thread_wake_next(rqstp);
svc_process_bc(req, rqstp); svc_process_bc(req, rqstp);
return;
} }
spin_unlock_bh(&serv->sv_cb_lock);
} }
#endif #endif
} }
......
...@@ -263,9 +263,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, ...@@ -263,9 +263,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Queue rqst for ULP's callback service */ /* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv; bc_serv = xprt->bc_serv;
xprt_get(xprt); xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock); lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
spin_unlock(&bc_serv->sv_cb_lock);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment