Commit 1c00dd07 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrmda: Reduce calls to ib_poll_cq() in completion handlers

Change the completion handlers to grab up to 16 items per
ib_poll_cq() call. No extra ib_poll_cq() is needed if fewer than 16
items are returned.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 7f23f6f6
...@@ -162,14 +162,23 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc) ...@@ -162,14 +162,23 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc)
} }
static int static int
rpcrdma_sendcq_poll(struct ib_cq *cq) rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
{ {
struct ib_wc wc; struct ib_wc *wcs;
int rc; int count, rc;
while ((rc = ib_poll_cq(cq, 1, &wc)) == 1) do {
rpcrdma_sendcq_process_wc(&wc); wcs = ep->rep_send_wcs;
return rc;
rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
if (rc <= 0)
return rc;
count = rc;
while (count-- > 0)
rpcrdma_sendcq_process_wc(wcs++);
} while (rc == RPCRDMA_POLLSIZE);
return 0;
} }
/* /*
...@@ -183,9 +192,10 @@ rpcrdma_sendcq_poll(struct ib_cq *cq) ...@@ -183,9 +192,10 @@ rpcrdma_sendcq_poll(struct ib_cq *cq)
static void static void
rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
{ {
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc; int rc;
rc = rpcrdma_sendcq_poll(cq); rc = rpcrdma_sendcq_poll(cq, ep);
if (rc) { if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n", dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc); __func__, rc);
...@@ -202,7 +212,7 @@ rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) ...@@ -202,7 +212,7 @@ rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
return; return;
} }
rpcrdma_sendcq_poll(cq); rpcrdma_sendcq_poll(cq, ep);
} }
static void static void
...@@ -241,14 +251,23 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc) ...@@ -241,14 +251,23 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc)
} }
static int static int
rpcrdma_recvcq_poll(struct ib_cq *cq) rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
{ {
struct ib_wc wc; struct ib_wc *wcs;
int rc; int count, rc;
while ((rc = ib_poll_cq(cq, 1, &wc)) == 1) do {
rpcrdma_recvcq_process_wc(&wc); wcs = ep->rep_recv_wcs;
return rc;
rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
if (rc <= 0)
return rc;
count = rc;
while (count-- > 0)
rpcrdma_recvcq_process_wc(wcs++);
} while (rc == RPCRDMA_POLLSIZE);
return 0;
} }
/* /*
...@@ -266,9 +285,10 @@ rpcrdma_recvcq_poll(struct ib_cq *cq) ...@@ -266,9 +285,10 @@ rpcrdma_recvcq_poll(struct ib_cq *cq)
static void static void
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context) rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
{ {
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc; int rc;
rc = rpcrdma_recvcq_poll(cq); rc = rpcrdma_recvcq_poll(cq, ep);
if (rc) { if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n", dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc); __func__, rc);
...@@ -285,7 +305,7 @@ rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context) ...@@ -285,7 +305,7 @@ rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
return; return;
} }
rpcrdma_recvcq_poll(cq); rpcrdma_recvcq_poll(cq, ep);
} }
#ifdef RPC_DEBUG #ifdef RPC_DEBUG
...@@ -721,7 +741,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -721,7 +741,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall, sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
rpcrdma_cq_async_error_upcall, NULL, rpcrdma_cq_async_error_upcall, ep,
ep->rep_attr.cap.max_send_wr + 1, 0); ep->rep_attr.cap.max_send_wr + 1, 0);
if (IS_ERR(sendcq)) { if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq); rc = PTR_ERR(sendcq);
...@@ -738,7 +758,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -738,7 +758,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
} }
recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall, recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
rpcrdma_cq_async_error_upcall, NULL, rpcrdma_cq_async_error_upcall, ep,
ep->rep_attr.cap.max_recv_wr + 1, 0); ep->rep_attr.cap.max_recv_wr + 1, 0);
if (IS_ERR(recvcq)) { if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq); rc = PTR_ERR(recvcq);
......
...@@ -74,6 +74,8 @@ struct rpcrdma_ia { ...@@ -74,6 +74,8 @@ struct rpcrdma_ia {
* RDMA Endpoint -- one per transport instance * RDMA Endpoint -- one per transport instance
*/ */
#define RPCRDMA_POLLSIZE (16)
struct rpcrdma_ep { struct rpcrdma_ep {
atomic_t rep_cqcount; atomic_t rep_cqcount;
int rep_cqinit; int rep_cqinit;
...@@ -88,6 +90,8 @@ struct rpcrdma_ep { ...@@ -88,6 +90,8 @@ struct rpcrdma_ep {
struct rdma_conn_param rep_remote_cma; struct rdma_conn_param rep_remote_cma;
struct sockaddr_storage rep_remote_addr; struct sockaddr_storage rep_remote_addr;
struct delayed_work rep_connect_worker; struct delayed_work rep_connect_worker;
struct ib_wc rep_send_wcs[RPCRDMA_POLLSIZE];
struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE];
}; };
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment