Commit 6f0afc28 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Remove atomic send completion counting

The sendctx circular queue now guarantees that xprtrdma cannot
overflow the Send Queue, so remove the remaining bits of the
original Send WQE counting mechanism.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 01bb35c8
...@@ -419,7 +419,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -419,7 +419,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ; IB_ACCESS_REMOTE_READ;
rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr); rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
if (rc) if (rc)
goto out_senderr; goto out_senderr;
...@@ -507,12 +506,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -507,12 +506,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
f->fr_cqe.done = frwr_wc_localinv_wake; f->fr_cqe.done = frwr_wc_localinv_wake;
reinit_completion(&f->fr_linv_done); reinit_completion(&f->fr_linv_done);
/* Initialize CQ count, since there is always a signaled
* WR being posted here. The new cqcount depends on how
* many SQEs are about to be consumed.
*/
rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
/* Transport disconnect drains the receive CQ before it /* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us * replaces the QP. The RPC reply handler won't call us
* unless ri_id->qp is a valid pointer. * unless ri_id->qp is a valid pointer.
...@@ -545,7 +538,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -545,7 +538,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
/* Find and reset the MRs in the LOCAL_INV WRs that did not /* Find and reset the MRs in the LOCAL_INV WRs that did not
* get posted. * get posted.
*/ */
rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
while (bad_wr) { while (bad_wr) {
f = container_of(bad_wr, struct rpcrdma_frmr, f = container_of(bad_wr, struct rpcrdma_frmr,
fr_invwr); fr_invwr);
......
...@@ -553,10 +553,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -553,10 +553,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
cdata->max_requests >> 2); cdata->max_requests >> 2);
ep->rep_send_count = ep->rep_send_batch; ep->rep_send_count = ep->rep_send_batch;
ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
if (ep->rep_cqinit <= 2)
ep->rep_cqinit = 0; /* always signal? */
rpcrdma_init_cqcount(ep, 0);
init_waitqueue_head(&ep->rep_connect_wait); init_waitqueue_head(&ep->rep_connect_wait);
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
......
...@@ -95,8 +95,6 @@ enum { ...@@ -95,8 +95,6 @@ enum {
struct rpcrdma_ep { struct rpcrdma_ep {
unsigned int rep_send_count; unsigned int rep_send_count;
unsigned int rep_send_batch; unsigned int rep_send_batch;
atomic_t rep_cqcount;
int rep_cqinit;
int rep_connected; int rep_connected;
struct ib_qp_init_attr rep_attr; struct ib_qp_init_attr rep_attr;
wait_queue_head_t rep_connect_wait; wait_queue_head_t rep_connect_wait;
...@@ -106,25 +104,6 @@ struct rpcrdma_ep { ...@@ -106,25 +104,6 @@ struct rpcrdma_ep {
struct delayed_work rep_connect_worker; struct delayed_work rep_connect_worker;
}; };
static inline void
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
{
atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
}
/* To update send queue accounting, provider must take a
* send completion every now and then.
*/
static inline void
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
{
send_wr->send_flags = 0;
if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
rpcrdma_init_cqcount(ep, 0);
send_wr->send_flags = IB_SEND_SIGNALED;
}
}
/* Pre-allocate extra Work Requests for handling backward receives /* Pre-allocate extra Work Requests for handling backward receives
* and sends. This is a fixed value because the Work Queues are * and sends. This is a fixed value because the Work Queues are
* allocated when the forward channel is set up. * allocated when the forward channel is set up.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment