Commit f6763c29 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Do not send Write chunk XDR pad with inline content

The NFS server's XDR encoders adds an XDR pad for content in the
xdr_buf page list at the beginning of the xdr_buf's tail buffer.

On RDMA transports, Write chunks are sent separately and without an
XDR pad.

If a Write chunk is being sent, strip off the pad in the tail buffer
so that inline content following the Write chunk remains XDR-aligned
when it is sent to the client.

BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=294Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent cf570a93
...@@ -224,7 +224,7 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, ...@@ -224,7 +224,7 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */ /* svc_rdma_sendto.c */
extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
struct svc_rdma_req_map *); struct svc_rdma_req_map *, bool);
extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_sendto(struct svc_rqst *);
extern struct rpcrdma_read_chunk * extern struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *); svc_rdma_get_read_chunk(struct rpcrdma_msg *);
......
...@@ -107,7 +107,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, ...@@ -107,7 +107,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
int ret; int ret;
vec = svc_rdma_get_req_map(rdma); vec = svc_rdma_get_req_map(rdma);
ret = svc_rdma_map_xdr(rdma, sndbuf, vec); ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false);
if (ret) if (ret)
goto out_err; goto out_err;
......
...@@ -57,7 +57,8 @@ static u32 xdr_padsize(u32 len) ...@@ -57,7 +57,8 @@ static u32 xdr_padsize(u32 len)
int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
struct xdr_buf *xdr, struct xdr_buf *xdr,
struct svc_rdma_req_map *vec) struct svc_rdma_req_map *vec,
bool write_chunk_present)
{ {
int sge_no; int sge_no;
u32 sge_bytes; u32 sge_bytes;
...@@ -97,9 +98,20 @@ int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, ...@@ -97,9 +98,20 @@ int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
/* Tail SGE */ /* Tail SGE */
if (xdr->tail[0].iov_len) { if (xdr->tail[0].iov_len) {
vec->sge[sge_no].iov_base = xdr->tail[0].iov_base; unsigned char *base = xdr->tail[0].iov_base;
vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; size_t len = xdr->tail[0].iov_len;
sge_no++; u32 xdr_pad = xdr_padsize(xdr->page_len);
if (write_chunk_present && xdr_pad) {
base += xdr_pad;
len -= xdr_pad;
}
if (len) {
vec->sge[sge_no].iov_base = base;
vec->sge[sge_no].iov_len = len;
sge_no++;
}
} }
dprintk("svcrdma: %s: sge_no %d page_no %d " dprintk("svcrdma: %s: sge_no %d page_no %d "
...@@ -594,7 +606,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -594,7 +606,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
ctxt = svc_rdma_get_context(rdma); ctxt = svc_rdma_get_context(rdma);
ctxt->direction = DMA_TO_DEVICE; ctxt->direction = DMA_TO_DEVICE;
vec = svc_rdma_get_req_map(rdma); vec = svc_rdma_get_req_map(rdma);
ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec); ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
if (ret) if (ret)
goto err0; goto err0;
inline_bytes = rqstp->rq_res.len; inline_bytes = rqstp->rq_res.len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment