Commit 25594290 authored by Steve Wise's avatar Steve Wise Committed by J. Bruce Fields

svcrdma: send_write() must not overflow the device's max sge

Function send_write() must stop creating sges when it reaches the device
max and return the amount sent in the RDMA Write to the caller.
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent a46cb7f2
...@@ -192,6 +192,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, ...@@ -192,6 +192,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
xdr_sge_no++; xdr_sge_no++;
BUG_ON(xdr_sge_no > vec->count); BUG_ON(xdr_sge_no > vec->count);
bc -= sge_bytes; bc -= sge_bytes;
if (sge_no == xprt->sc_max_sge)
break;
} }
/* Prepare WRITE WR */ /* Prepare WRITE WR */
...@@ -209,7 +211,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, ...@@ -209,7 +211,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
atomic_inc(&rdma_stat_write); atomic_inc(&rdma_stat_write);
if (svc_rdma_send(xprt, &write_wr)) if (svc_rdma_send(xprt, &write_wr))
goto err; goto err;
return 0; return write_len - bc;
err: err:
svc_rdma_unmap_dma(ctxt); svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 0); svc_rdma_put_context(ctxt, 0);
...@@ -225,7 +227,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, ...@@ -225,7 +227,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
{ {
u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
int write_len; int write_len;
int max_write;
u32 xdr_off; u32 xdr_off;
int chunk_off; int chunk_off;
int chunk_no; int chunk_no;
...@@ -239,8 +240,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, ...@@ -239,8 +240,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
res_ary = (struct rpcrdma_write_array *) res_ary = (struct rpcrdma_write_array *)
&rdma_resp->rm_body.rm_chunks[1]; &rdma_resp->rm_body.rm_chunks[1];
max_write = xprt->sc_max_sge * PAGE_SIZE;
/* Write chunks start at the pagelist */ /* Write chunks start at the pagelist */
for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
xfer_len && chunk_no < arg_ary->wc_nchunks; xfer_len && chunk_no < arg_ary->wc_nchunks;
...@@ -260,23 +259,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, ...@@ -260,23 +259,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
write_len); write_len);
chunk_off = 0; chunk_off = 0;
while (write_len) { while (write_len) {
int this_write;
this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp, ret = send_write(xprt, rqstp,
ntohl(arg_ch->rs_handle), ntohl(arg_ch->rs_handle),
rs_offset + chunk_off, rs_offset + chunk_off,
xdr_off, xdr_off,
this_write, write_len,
vec); vec);
if (ret) { if (ret <= 0) {
dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
ret); ret);
return -EIO; return -EIO;
} }
chunk_off += this_write; chunk_off += ret;
xdr_off += this_write; xdr_off += ret;
xfer_len -= this_write; xfer_len -= ret;
write_len -= this_write; write_len -= ret;
} }
} }
/* Update the req with the number of chunks actually used */ /* Update the req with the number of chunks actually used */
...@@ -293,7 +290,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, ...@@ -293,7 +290,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
{ {
u32 xfer_len = rqstp->rq_res.len; u32 xfer_len = rqstp->rq_res.len;
int write_len; int write_len;
int max_write;
u32 xdr_off; u32 xdr_off;
int chunk_no; int chunk_no;
int chunk_off; int chunk_off;
...@@ -311,8 +307,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, ...@@ -311,8 +307,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
res_ary = (struct rpcrdma_write_array *) res_ary = (struct rpcrdma_write_array *)
&rdma_resp->rm_body.rm_chunks[2]; &rdma_resp->rm_body.rm_chunks[2];
max_write = xprt->sc_max_sge * PAGE_SIZE;
/* xdr offset starts at RPC message */ /* xdr offset starts at RPC message */
nchunks = ntohl(arg_ary->wc_nchunks); nchunks = ntohl(arg_ary->wc_nchunks);
for (xdr_off = 0, chunk_no = 0; for (xdr_off = 0, chunk_no = 0;
...@@ -330,24 +324,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, ...@@ -330,24 +324,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
write_len); write_len);
chunk_off = 0; chunk_off = 0;
while (write_len) { while (write_len) {
int this_write;
this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp, ret = send_write(xprt, rqstp,
ntohl(ch->rs_handle), ntohl(ch->rs_handle),
rs_offset + chunk_off, rs_offset + chunk_off,
xdr_off, xdr_off,
this_write, write_len,
vec); vec);
if (ret) { if (ret <= 0) {
dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
ret); ret);
return -EIO; return -EIO;
} }
chunk_off += this_write; chunk_off += ret;
xdr_off += this_write; xdr_off += ret;
xfer_len -= this_write; xfer_len -= ret;
write_len -= this_write; write_len -= ret;
} }
} }
/* Update the req with the number of chunks actually used */ /* Update the req with the number of chunks actually used */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment