Commit c2922c02 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Properly handle exhaustion of the rb_mws list

If the rb_mws list is exhausted, clean up and return NULL so that
call_allocate() will delay and try again.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Tested-by: default avatarSteve Wise <swise@opengridcomputing.com>
Tested-by: default avatarShirley Ma <shirley.ma@oracle.com>
Tested-by: default avatarDevesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 3111d72c
...@@ -1256,6 +1256,67 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) ...@@ -1256,6 +1256,67 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
kfree(buf->rb_pool); kfree(buf->rb_pool);
} }
/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
* some req segments uninitialized.
*/
static void
rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
{
if (*mw) {
list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
*mw = NULL;
}
}
/* Cycle mw's back in reverse order, and "spin" them.
* This delays and scrambles reuse as much as possible.
*/
static void
rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
{
struct rpcrdma_mr_seg *seg = req->rl_segments;
struct rpcrdma_mr_seg *seg1 = seg;
int i;
for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf);
rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf);
}
static void
rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
{
buf->rb_send_bufs[--buf->rb_send_index] = req;
req->rl_niovs = 0;
if (req->rl_reply) {
buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
req->rl_reply->rr_func = NULL;
req->rl_reply = NULL;
}
}
static struct rpcrdma_req *
rpcrdma_buffer_get_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
{
struct rpcrdma_mw *r;
int i;
i = RPCRDMA_MAX_SEGS - 1;
while (!list_empty(&buf->rb_mws)) {
r = list_entry(buf->rb_mws.next,
struct rpcrdma_mw, mw_list);
list_del(&r->mw_list);
req->rl_segments[i].mr_chunk.rl_mw = r;
if (unlikely(i-- == 0))
return req; /* Success */
}
/* Not enough entries on rb_mws for this req */
rpcrdma_buffer_put_sendbuf(req, buf);
rpcrdma_buffer_put_mrs(req, buf);
return NULL;
}
/* /*
* Get a set of request/reply buffers. * Get a set of request/reply buffers.
* *
...@@ -1268,10 +1329,9 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) ...@@ -1268,10 +1329,9 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
struct rpcrdma_req * struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{ {
struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
struct rpcrdma_req *req; struct rpcrdma_req *req;
unsigned long flags; unsigned long flags;
int i;
struct rpcrdma_mw *r;
spin_lock_irqsave(&buffers->rb_lock, flags); spin_lock_irqsave(&buffers->rb_lock, flags);
if (buffers->rb_send_index == buffers->rb_max_requests) { if (buffers->rb_send_index == buffers->rb_max_requests) {
...@@ -1291,14 +1351,13 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) ...@@ -1291,14 +1351,13 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL; buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
} }
buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
if (!list_empty(&buffers->rb_mws)) { switch (ia->ri_memreg_strategy) {
i = RPCRDMA_MAX_SEGS - 1; case RPCRDMA_FRMR:
do { case RPCRDMA_MTHCAFMR:
r = list_entry(buffers->rb_mws.next, req = rpcrdma_buffer_get_mrs(req, buffers);
struct rpcrdma_mw, mw_list); break;
list_del(&r->mw_list); default:
req->rl_segments[i].mr_chunk.rl_mw = r; break;
} while (--i >= 0);
} }
spin_unlock_irqrestore(&buffers->rb_lock, flags); spin_unlock_irqrestore(&buffers->rb_lock, flags);
return req; return req;
...@@ -1313,34 +1372,14 @@ rpcrdma_buffer_put(struct rpcrdma_req *req) ...@@ -1313,34 +1372,14 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
{ {
struct rpcrdma_buffer *buffers = req->rl_buffer; struct rpcrdma_buffer *buffers = req->rl_buffer;
struct rpcrdma_ia *ia = rdmab_to_ia(buffers); struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
int i;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&buffers->rb_lock, flags); spin_lock_irqsave(&buffers->rb_lock, flags);
buffers->rb_send_bufs[--buffers->rb_send_index] = req; rpcrdma_buffer_put_sendbuf(req, buffers);
req->rl_niovs = 0;
if (req->rl_reply) {
buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply;
req->rl_reply->rr_func = NULL;
req->rl_reply = NULL;
}
switch (ia->ri_memreg_strategy) { switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR: case RPCRDMA_FRMR:
case RPCRDMA_MTHCAFMR: case RPCRDMA_MTHCAFMR:
/* rpcrdma_buffer_put_mrs(req, buffers);
* Cycle mw's back in reverse order, and "spin" them.
* This delays and scrambles reuse as much as possible.
*/
i = 1;
do {
struct rpcrdma_mw **mw;
mw = &req->rl_segments[i].mr_chunk.rl_mw;
list_add_tail(&(*mw)->mw_list, &buffers->rb_mws);
*mw = NULL;
} while (++i < RPCRDMA_MAX_SEGS);
list_add_tail(&req->rl_segments[0].mr_chunk.rl_mw->mw_list,
&buffers->rb_mws);
req->rl_segments[0].mr_chunk.rl_mw = NULL;
break; break;
default: default:
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment