Commit 857f9aca authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Change return value of rpcrdma_prepare_send_sges()

Clean up: Make rpcrdma_prepare_send_sges() return a negative errno
instead of a bool. Soon callers will want distinct treatments of
different types of failures.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 394b2c77
...@@ -222,8 +222,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) ...@@ -222,8 +222,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
*p++ = xdr_zero; *p++ = xdr_zero;
*p = xdr_zero; *p = xdr_zero;
if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN, if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
&rqst->rq_snd_buf, rpcrdma_noch)) &rqst->rq_snd_buf, rpcrdma_noch))
return -EIO; return -EIO;
return 0; return 0;
} }
......
...@@ -544,7 +544,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -544,7 +544,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
if (!__rpcrdma_dma_map_regbuf(ia, rb)) if (!__rpcrdma_dma_map_regbuf(ia, rb))
return false; goto out_regbuf;
sge->addr = rdmab_addr(rb); sge->addr = rdmab_addr(rb);
sge->lkey = rdmab_lkey(rb); sge->lkey = rdmab_lkey(rb);
} }
...@@ -554,6 +554,10 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -554,6 +554,10 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
sge->length, DMA_TO_DEVICE); sge->length, DMA_TO_DEVICE);
req->rl_send_wr.num_sge++; req->rl_send_wr.num_sge++;
return true; return true;
out_regbuf:
pr_err("rpcrdma: failed to DMA map a Send buffer\n");
return false;
} }
/* Prepare the Send SGEs. The head and tail iovec, and each entry /* Prepare the Send SGEs. The head and tail iovec, and each entry
...@@ -574,7 +578,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -574,7 +578,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
* DMA-mapped. Sync the content that has changed. * DMA-mapped. Sync the content that has changed.
*/ */
if (!rpcrdma_dma_map_regbuf(ia, rb)) if (!rpcrdma_dma_map_regbuf(ia, rb))
return false; goto out_regbuf;
sge_no = 1; sge_no = 1;
sge[sge_no].addr = rdmab_addr(rb); sge[sge_no].addr = rdmab_addr(rb);
sge[sge_no].length = xdr->head[0].iov_len; sge[sge_no].length = xdr->head[0].iov_len;
...@@ -662,6 +666,10 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -662,6 +666,10 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
req->rl_send_wr.num_sge += sge_no; req->rl_send_wr.num_sge += sge_no;
return true; return true;
out_regbuf:
pr_err("rpcrdma: failed to DMA map a Send buffer\n");
return false;
out_mapping_overflow: out_mapping_overflow:
rpcrdma_unmap_sges(ia, req); rpcrdma_unmap_sges(ia, req);
pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
...@@ -673,26 +681,32 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -673,26 +681,32 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
return false; return false;
} }
bool /**
rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
u32 hdrlen, struct xdr_buf *xdr, * @r_xprt: controlling transport
enum rpcrdma_chunktype rtype) * @req: context of RPC Call being marshalled
* @hdrlen: size of transport header, in bytes
* @xdr: xdr_buf containing RPC Call
* @rtype: chunk type being encoded
*
* Returns 0 on success; otherwise a negative errno is returned.
*/
int
rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, u32 hdrlen,
struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
{ {
req->rl_send_wr.num_sge = 0; req->rl_send_wr.num_sge = 0;
req->rl_mapped_sges = 0; req->rl_mapped_sges = 0;
if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen)) if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
goto out_map; return -EIO;
if (rtype != rpcrdma_areadch) if (rtype != rpcrdma_areadch)
if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype)) if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
goto out_map; return -EIO;
return true;
out_map: return 0;
pr_err("rpcrdma: failed to DMA map a Send buffer\n");
return false;
} }
/** /**
...@@ -843,12 +857,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) ...@@ -843,12 +857,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
transfertypes[rtype], transfertypes[wtype], transfertypes[rtype], transfertypes[wtype],
xdr_stream_pos(xdr)); xdr_stream_pos(xdr));
if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
xdr_stream_pos(xdr), &rqst->rq_snd_buf, rtype);
&rqst->rq_snd_buf, rtype)) { if (ret)
ret = -EIO;
goto out_err; goto out_err;
}
return 0; return 0;
out_err: out_err:
......
...@@ -613,8 +613,10 @@ enum rpcrdma_chunktype { ...@@ -613,8 +613,10 @@ enum rpcrdma_chunktype {
rpcrdma_replych rpcrdma_replych
}; };
bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *, int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
u32, struct xdr_buf *, enum rpcrdma_chunktype); struct rpcrdma_req *req, u32 hdrlen,
struct xdr_buf *xdr,
enum rpcrdma_chunktype rtype);
void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *); void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment