Commit 02e7452d authored by Tom Tucker's avatar Tom Tucker

svcrdma: Simplify RDMA_READ deferral buffer management

An NFS_WRITE requires a set of RDMA_READ requests to fetch the write
data from the client. There are two principal pieces of data that
need to be tracked: the list of pages that comprise the completed RPC
and the SGE of dma mapped pages to refer to this list of pages. Previously
this whole bit was managed as a linked list of contexts with the
context containing the page list buried in this list. This patch
simplifies this processing by not keeping a linked list, but rather only
a pionter from the last submitted RDMA_READ's context to the context
that maps the set of pages that describe the RPC.  This significantly
simplifies this code path. SGE contexts are cleaned up inline in the DTO
path instead of at read completion time.
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent 10a38c33
......@@ -71,6 +71,7 @@ extern atomic_t rdma_stat_sq_prod;
* completes.
*/
struct svc_rdma_op_ctxt {
struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_op_ctxt *next;
struct xdr_buf arg;
struct list_head dto_q;
......
......@@ -289,7 +289,6 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
u64 sgl_offset;
struct rpcrdma_read_chunk *ch;
struct svc_rdma_op_ctxt *ctxt = NULL;
struct svc_rdma_op_ctxt *head;
struct svc_rdma_op_ctxt *tmp_sge_ctxt;
struct svc_rdma_op_ctxt *tmp_ch_ctxt;
struct chunk_sge *ch_sge_ary;
......@@ -310,20 +309,13 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
sge, ch_sge_ary,
ch_count, byte_count);
head = svc_rdma_get_context(xprt);
sgl_offset = 0;
ch_no = 0;
for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
ch->rc_discrim != 0; ch++, ch_no++) {
next_sge:
if (!ctxt)
ctxt = head;
else {
ctxt->next = svc_rdma_get_context(xprt);
ctxt = ctxt->next;
}
ctxt->next = NULL;
ctxt = svc_rdma_get_context(xprt);
ctxt->direction = DMA_FROM_DEVICE;
clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
......@@ -351,20 +343,15 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
* the client and the RPC needs to be enqueued.
*/
set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
ctxt->next = hdr_ctxt;
hdr_ctxt->next = head;
ctxt->read_hdr = hdr_ctxt;
}
/* Post the read */
err = svc_rdma_send(xprt, &read_wr);
if (err) {
printk(KERN_ERR "svcrdma: Error posting send = %d\n",
printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
err);
/*
* Break the circular list so free knows when
* to stop if the error happened to occur on
* the last read
*/
ctxt->next = NULL;
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
svc_rdma_put_context(ctxt, 0);
goto out;
}
atomic_inc(&rdma_stat_read);
......@@ -375,7 +362,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
goto next_sge;
}
sgl_offset = 0;
err = 0;
err = 1;
}
out:
......@@ -393,25 +380,12 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
while (rqstp->rq_resused)
rqstp->rq_respages[--rqstp->rq_resused] = NULL;
if (err) {
printk(KERN_ERR "svcrdma : RDMA_READ error = %d\n", err);
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
/* Free the linked list of read contexts */
while (head != NULL) {
ctxt = head->next;
svc_rdma_put_context(head, 1);
head = ctxt;
}
return err;
}
return 1;
return err;
}
static int rdma_read_complete(struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *data)
struct svc_rdma_op_ctxt *head)
{
struct svc_rdma_op_ctxt *head = data->next;
int page_no;
int ret;
......@@ -437,22 +411,12 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
rqstp->rq_arg.len = head->arg.len;
rqstp->rq_arg.buflen = head->arg.buflen;
/* Free the context */
svc_rdma_put_context(head, 0);
/* XXX: What should this be? */
rqstp->rq_prot = IPPROTO_MAX;
/*
* Free the contexts we used to build the RDMA_READ. We have
* to be careful here because the context list uses the same
* next pointer used to chain the contexts associated with the
* RDMA_READ
*/
data->next = NULL; /* terminate circular list */
do {
data = head->next;
svc_rdma_put_context(head, 0);
head = data;
} while (head != NULL);
ret = rqstp->rq_arg.head[0].iov_len
+ rqstp->rq_arg.page_len
+ rqstp->rq_arg.tail[0].iov_len;
......
......@@ -352,13 +352,16 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
case IB_WR_RDMA_READ:
if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
BUG_ON(!read_hdr);
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
spin_lock_bh(&xprt->sc_read_complete_lock);
list_add_tail(&ctxt->dto_q,
list_add_tail(&read_hdr->dto_q,
&xprt->sc_read_complete_q);
spin_unlock_bh(&xprt->sc_read_complete_lock);
svc_xprt_enqueue(&xprt->sc_xprt);
}
svc_rdma_put_context(ctxt, 0);
break;
default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment