Commit 23262790 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Clean up Send SGE accounting

Clean up: Since there's already a svc_rdma_op_ctxt being passed
around with the running count of mapped SGEs, drop unneeded
parameters to svc_rdma_post_send_wr().
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent f016f305
...@@ -188,7 +188,7 @@ extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, ...@@ -188,7 +188,7 @@ extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
__be32 *rdma_resp, unsigned int len); __be32 *rdma_resp, unsigned int len);
extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
struct svc_rdma_op_ctxt *ctxt, struct svc_rdma_op_ctxt *ctxt,
int num_sge, u32 inv_rkey); u32 inv_rkey);
extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_sendto(struct svc_rqst *);
/* svc_rdma_transport.c */ /* svc_rdma_transport.c */
......
...@@ -135,7 +135,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, ...@@ -135,7 +135,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
* the rq_buffer before all retransmits are complete. * the rq_buffer before all retransmits are complete.
*/ */
get_page(virt_to_page(rqst->rq_buffer)); get_page(virt_to_page(rqst->rq_buffer));
ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); ret = svc_rdma_post_send_wr(rdma, ctxt, 0);
if (ret) if (ret)
goto out_unmap; goto out_unmap;
......
...@@ -639,7 +639,7 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, ...@@ -639,7 +639,7 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
return; return;
} }
ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0); ret = svc_rdma_post_send_wr(xprt, ctxt, 0);
if (ret) { if (ret) {
svc_rdma_unmap_dma(ctxt); svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
......
...@@ -365,8 +365,7 @@ int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, ...@@ -365,8 +365,7 @@ int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
/* Load the xdr_buf into the ctxt's sge array, and DMA map each /* Load the xdr_buf into the ctxt's sge array, and DMA map each
* element as it is added. * element as it is added.
* *
* Returns the number of sge elements loaded on success, or * Returns zero on success, or a negative errno on failure.
* a negative errno on failure.
*/ */
static int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, static int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_op_ctxt *ctxt, struct svc_rdma_op_ctxt *ctxt,
...@@ -429,7 +428,7 @@ static int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, ...@@ -429,7 +428,7 @@ static int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
return ret; return ret;
} }
return sge_no - 1; return 0;
} }
/* The svc_rqst and all resources it owns are released as soon as /* The svc_rqst and all resources it owns are released as soon as
...@@ -453,7 +452,6 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, ...@@ -453,7 +452,6 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
* svc_rdma_post_send_wr - Set up and post one Send Work Request * svc_rdma_post_send_wr - Set up and post one Send Work Request
* @rdma: controlling transport * @rdma: controlling transport
* @ctxt: op_ctxt for transmitting the Send WR * @ctxt: op_ctxt for transmitting the Send WR
* @num_sge: number of SGEs to send
* @inv_rkey: R_key argument to Send With Invalidate, or zero * @inv_rkey: R_key argument to Send With Invalidate, or zero
* *
* Returns: * Returns:
...@@ -463,18 +461,19 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, ...@@ -463,18 +461,19 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
* %-ENOMEM if ib_post_send failed. * %-ENOMEM if ib_post_send failed.
*/ */
int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
struct svc_rdma_op_ctxt *ctxt, int num_sge, struct svc_rdma_op_ctxt *ctxt,
u32 inv_rkey) u32 inv_rkey)
{ {
struct ib_send_wr *send_wr = &ctxt->send_wr; struct ib_send_wr *send_wr = &ctxt->send_wr;
dprintk("svcrdma: posting Send WR with %u sge(s)\n", num_sge); dprintk("svcrdma: posting Send WR with %u sge(s)\n",
ctxt->mapped_sges);
send_wr->next = NULL; send_wr->next = NULL;
ctxt->cqe.done = svc_rdma_wc_send; ctxt->cqe.done = svc_rdma_wc_send;
send_wr->wr_cqe = &ctxt->cqe; send_wr->wr_cqe = &ctxt->cqe;
send_wr->sg_list = ctxt->sge; send_wr->sg_list = ctxt->sge;
send_wr->num_sge = num_sge; send_wr->num_sge = ctxt->mapped_sges;
send_wr->send_flags = IB_SEND_SIGNALED; send_wr->send_flags = IB_SEND_SIGNALED;
if (inv_rkey) { if (inv_rkey) {
send_wr->opcode = IB_WR_SEND_WITH_INV; send_wr->opcode = IB_WR_SEND_WITH_INV;
...@@ -532,7 +531,7 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, ...@@ -532,7 +531,7 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
inv_rkey = 0; inv_rkey = 0;
if (rdma->sc_snd_w_inv) if (rdma->sc_snd_w_inv)
inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_lst, rp_ch); inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_lst, rp_ch);
ret = svc_rdma_post_send_wr(rdma, ctxt, 1 + ret, inv_rkey); ret = svc_rdma_post_send_wr(rdma, ctxt, inv_rkey);
if (ret) if (ret)
goto err; goto err;
...@@ -574,7 +573,7 @@ static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, ...@@ -574,7 +573,7 @@ static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
svc_rdma_save_io_pages(rqstp, ctxt); svc_rdma_save_io_pages(rqstp, ctxt);
ret = svc_rdma_post_send_wr(rdma, ctxt, 1 + ret, 0); ret = svc_rdma_post_send_wr(rdma, ctxt, 0);
if (ret) if (ret)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment