Commit 0d1bf340 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Allow dynamic allocation of back channel slots

Now that the reads happen in a process context rather than a softirq,
it is safe to allocate back channel slots using a reclaiming
allocation.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 067c4696
...@@ -235,7 +235,8 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs) ...@@ -235,7 +235,8 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
list_empty(&xprt->bc_pa_list) ? "true" : "false"); list_empty(&xprt->bc_pa_list) ? "true" : "false");
} }
static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
struct rpc_rqst *new)
{ {
struct rpc_rqst *req = NULL; struct rpc_rqst *req = NULL;
...@@ -243,10 +244,9 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) ...@@ -243,10 +244,9 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
if (atomic_read(&xprt->bc_free_slots) <= 0) if (atomic_read(&xprt->bc_free_slots) <= 0)
goto not_found; goto not_found;
if (list_empty(&xprt->bc_pa_list)) { if (list_empty(&xprt->bc_pa_list)) {
req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); if (!new)
if (!req)
goto not_found; goto not_found;
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++; xprt->bc_alloc_count++;
} }
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
...@@ -256,8 +256,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) ...@@ -256,8 +256,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
sizeof(req->rq_private_buf)); sizeof(req->rq_private_buf));
req->rq_xid = xid; req->rq_xid = xid;
req->rq_connect_cookie = xprt->connect_cookie; req->rq_connect_cookie = xprt->connect_cookie;
not_found:
dprintk("RPC: backchannel req=%p\n", req); dprintk("RPC: backchannel req=%p\n", req);
not_found:
return req; return req;
} }
...@@ -320,18 +320,27 @@ void xprt_free_bc_rqst(struct rpc_rqst *req) ...@@ -320,18 +320,27 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
*/ */
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
{ {
struct rpc_rqst *req; struct rpc_rqst *req, *new = NULL;
spin_lock(&xprt->bc_pa_lock); do {
list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { spin_lock(&xprt->bc_pa_lock);
if (req->rq_connect_cookie != xprt->connect_cookie) list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
continue; if (req->rq_connect_cookie != xprt->connect_cookie)
if (req->rq_xid == xid) continue;
goto found; if (req->rq_xid == xid)
} goto found;
req = xprt_alloc_bc_request(xprt, xid); }
req = xprt_get_bc_request(xprt, xid, new);
found: found:
spin_unlock(&xprt->bc_pa_lock); spin_unlock(&xprt->bc_pa_lock);
if (new) {
if (req != new)
xprt_free_bc_rqst(new);
break;
} else if (req)
break;
new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
} while (new);
return req; return req;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment