Commit ae225fe2 authored by Chuck Lever's avatar Chuck Lever

svcrdma: Add an async version of svc_rdma_send_ctxt_put()

DMA unmapping can take quite some time, so it should not be handled
in a single-threaded completion handler. Defer releasing send_ctxts
to the recently-added workqueue.

With this patch, DMA unmapping can be handled in parallel, and it
does not cause head-of-queue blocking of Send completions.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 9c7e1a06
...@@ -152,7 +152,9 @@ struct svc_rdma_recv_ctxt { ...@@ -152,7 +152,9 @@ struct svc_rdma_recv_ctxt {
struct svc_rdma_send_ctxt { struct svc_rdma_send_ctxt {
struct llist_node sc_node; struct llist_node sc_node;
struct rpc_rdma_cid sc_cid; struct rpc_rdma_cid sc_cid;
struct work_struct sc_work;
struct svcxprt_rdma *sc_rdma;
struct ib_send_wr sc_send_wr; struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe; struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf; struct xdr_buf sc_hdrbuf;
......
...@@ -143,6 +143,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) ...@@ -143,6 +143,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
ctxt->sc_rdma = rdma;
ctxt->sc_send_wr.next = NULL; ctxt->sc_send_wr.next = NULL;
ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
ctxt->sc_send_wr.sg_list = ctxt->sc_sges; ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
...@@ -223,15 +224,8 @@ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) ...@@ -223,15 +224,8 @@ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
goto out; goto out;
} }
/** static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
* svc_rdma_send_ctxt_put - Return send_ctxt to free list struct svc_rdma_send_ctxt *ctxt)
* @rdma: controlling svcxprt_rdma
* @ctxt: object to return to the free list
*
* Pages left in sc_pages are DMA unmapped and released.
*/
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt)
{ {
struct ib_device *device = rdma->sc_cm_id->device; struct ib_device *device = rdma->sc_cm_id->device;
unsigned int i; unsigned int i;
...@@ -255,6 +249,28 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, ...@@ -255,6 +249,28 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts); llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
} }
static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
{
struct svc_rdma_send_ctxt *ctxt;
ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
}
/**
* svc_rdma_send_ctxt_put - Return send_ctxt to free list
* @rdma: controlling svcxprt_rdma
* @ctxt: object to return to the free list
*
* Pages left in sc_pages are DMA unmapped and released.
*/
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt)
{
INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
queue_work(svcrdma_wq, &ctxt->sc_work);
}
/** /**
* svc_rdma_wake_send_waiters - manage Send Queue accounting * svc_rdma_wake_send_waiters - manage Send Queue accounting
* @rdma: controlling transport * @rdma: controlling transport
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment