Commit 94f58c58 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Allow Read list and Reply chunk simultaneously

rpcrdma_marshal_req() makes a simplifying assumption: that NFS
operations with large Call messages have small Reply messages, and
vice versa. Therefore with RPC-over-RDMA, only one chunk type is
ever needed for each Call/Reply pair, because one direction needs
chunks, the other direction will always fit inline.

In fact, this assumption is asserted in the code:

  if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
  	dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
		__func__);
	return -EIO;
  }

But RPCGSS_SEC breaks this assumption. Because krb5i and krb5p
perform data transformation on RPC messages before they are
transmitted, direct data placement techniques cannot be used, thus
RPC messages must be sent via a Long call in both directions.
All such calls are sent with a Position Zero Read chunk, and all
such replies are handled with a Reply chunk. Thus the client must
provide every Call/Reply pair with both a Read list and a Reply
chunk.

Without any special security in effect, NFSv4 WRITEs may now also
use the Read list and provide a Reply chunk. The marshal_req
logic was preventing that, meaning an NFSv4 WRITE with a large
payload that included a GETATTR result larger than the inline
threshold would fail.

The code that encodes each chunk list is now completely contained in
its own function. There is some code duplication, but the trade-off
is that the overall logic should be more clear.

Note that all three chunk lists now share the rl_segments array.
Some additional per-req accounting is necessary to track this
usage. For the same reasons that the above simplifying assumption
has held true for so long, I don't expect more array elements are
needed at this time.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Tested-by: default avatarSteve Wise <swise@opengridcomputing.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 88b18a12
...@@ -62,17 +62,17 @@ enum rpcrdma_chunktype { ...@@ -62,17 +62,17 @@ enum rpcrdma_chunktype {
}; };
static const char transfertypes[][12] = { static const char transfertypes[][12] = {
"pure inline", /* no chunks */ "inline", /* no chunks */
" read chunk", /* some argument via rdma read */ "read list", /* some argument via rdma read */
"*read chunk", /* entire request via rdma read */ "*read list", /* entire request via rdma read */
"write chunk", /* some result via rdma write */ "write list", /* some result via rdma write */
"reply chunk" /* entire reply via rdma write */ "reply chunk" /* entire reply via rdma write */
}; };
/* Returns size of largest RPC-over-RDMA header in a Call message /* Returns size of largest RPC-over-RDMA header in a Call message
* *
* The client marshals only one chunk list per Call message. * The largest Call header contains a full-size Read list and a
* The largest list is the Read list. * minimal Reply chunk.
*/ */
static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
{ {
...@@ -85,6 +85,11 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) ...@@ -85,6 +85,11 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
maxsegs += 2; /* segment for head and tail buffers */ maxsegs += 2; /* segment for head and tail buffers */
size = maxsegs * sizeof(struct rpcrdma_read_chunk); size = maxsegs * sizeof(struct rpcrdma_read_chunk);
/* Minimal Read chunk size */
size += sizeof(__be32); /* segment count */
size += sizeof(struct rpcrdma_segment);
size += sizeof(__be32); /* list discriminator */
dprintk("RPC: %s: max call header size = %u\n", dprintk("RPC: %s: max call header size = %u\n",
__func__, size); __func__, size);
return size; return size;
...@@ -431,6 +436,209 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, ...@@ -431,6 +436,209 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
return n; return n;
} }
static inline __be32 *
xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr_seg *seg)
{
*iptr++ = cpu_to_be32(seg->mr_rkey);
*iptr++ = cpu_to_be32(seg->mr_len);
return xdr_encode_hyper(iptr, seg->mr_base);
}
/* XDR-encode the Read list. Supports encoding a list of read
* segments that belong to a single read chunk.
*
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
*
* Read chunklist (a linked list):
* N elements, position P (same P for all chunks of same arg!):
* 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
*
* Returns a pointer to the XDR word in the RDMA header following
* the end of the Read list, or an error pointer.
*/
static __be32 *
rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, struct rpc_rqst *rqst,
__be32 *iptr, enum rpcrdma_chunktype rtype)
{
struct rpcrdma_mr_seg *seg = req->rl_nextseg;
unsigned int pos;
int n, nsegs;
if (rtype == rpcrdma_noch) {
*iptr++ = xdr_zero; /* item not present */
return iptr;
}
pos = rqst->rq_snd_buf.head[0].iov_len;
if (rtype == rpcrdma_areadch)
pos = 0;
nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg,
RPCRDMA_MAX_SEGS - req->rl_nchunks);
if (nsegs < 0)
return ERR_PTR(nsegs);
do {
n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, false);
if (n <= 0)
return ERR_PTR(n);
*iptr++ = xdr_one; /* item present */
/* All read segments in this chunk
* have the same "position".
*/
*iptr++ = cpu_to_be32(pos);
iptr = xdr_encode_rdma_segment(iptr, seg);
dprintk("RPC: %5u %s: read segment pos %u "
"%d@0x%016llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__, pos,
seg->mr_len, (unsigned long long)seg->mr_base,
seg->mr_rkey, n < nsegs ? "more" : "last");
r_xprt->rx_stats.read_chunk_count++;
req->rl_nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
req->rl_nextseg = seg;
/* Finish Read list */
*iptr++ = xdr_zero; /* Next item not present */
return iptr;
}
/* XDR-encode the Write list. Supports encoding a list containing
* one array of plain segments that belong to a single write chunk.
*
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
*
* Write chunklist (a list of (one) counted array):
* N elements:
* 1 - N - HLOO - HLOO - ... - HLOO - 0
*
* Returns a pointer to the XDR word in the RDMA header following
* the end of the Write list, or an error pointer.
*/
static __be32 *
rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpc_rqst *rqst, __be32 *iptr,
enum rpcrdma_chunktype wtype)
{
struct rpcrdma_mr_seg *seg = req->rl_nextseg;
int n, nsegs, nchunks;
__be32 *segcount;
if (wtype != rpcrdma_writech) {
*iptr++ = xdr_zero; /* no Write list present */
return iptr;
}
nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
rqst->rq_rcv_buf.head[0].iov_len,
wtype, seg,
RPCRDMA_MAX_SEGS - req->rl_nchunks);
if (nsegs < 0)
return ERR_PTR(nsegs);
*iptr++ = xdr_one; /* Write list present */
segcount = iptr++; /* save location of segment count */
nchunks = 0;
do {
n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
if (n <= 0)
return ERR_PTR(n);
iptr = xdr_encode_rdma_segment(iptr, seg);
dprintk("RPC: %5u %s: write segment "
"%d@0x016%llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__,
seg->mr_len, (unsigned long long)seg->mr_base,
seg->mr_rkey, n < nsegs ? "more" : "last");
r_xprt->rx_stats.write_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
req->rl_nchunks++;
nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
req->rl_nextseg = seg;
/* Update count of segments in this Write chunk */
*segcount = cpu_to_be32(nchunks);
/* Finish Write list */
*iptr++ = xdr_zero; /* Next item not present */
return iptr;
}
/* XDR-encode the Reply chunk. Supports encoding an array of plain
* segments that belong to a single write (reply) chunk.
*
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
*
* Reply chunk (a counted array):
* N elements:
* 1 - N - HLOO - HLOO - ... - HLOO
*
* Returns a pointer to the XDR word in the RDMA header following
* the end of the Reply chunk, or an error pointer.
*/
static __be32 *
rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, struct rpc_rqst *rqst,
__be32 *iptr, enum rpcrdma_chunktype wtype)
{
struct rpcrdma_mr_seg *seg = req->rl_nextseg;
int n, nsegs, nchunks;
__be32 *segcount;
if (wtype != rpcrdma_replych) {
*iptr++ = xdr_zero; /* no Reply chunk present */
return iptr;
}
nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
RPCRDMA_MAX_SEGS - req->rl_nchunks);
if (nsegs < 0)
return ERR_PTR(nsegs);
*iptr++ = xdr_one; /* Reply chunk present */
segcount = iptr++; /* save location of segment count */
nchunks = 0;
do {
n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
if (n <= 0)
return ERR_PTR(n);
iptr = xdr_encode_rdma_segment(iptr, seg);
dprintk("RPC: %5u %s: reply segment "
"%d@0x%016llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__,
seg->mr_len, (unsigned long long)seg->mr_base,
seg->mr_rkey, n < nsegs ? "more" : "last");
r_xprt->rx_stats.reply_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
req->rl_nchunks++;
nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
req->rl_nextseg = seg;
/* Update count of segments in the Reply chunk */
*segcount = cpu_to_be32(nchunks);
return iptr;
}
/* /*
* Copy write data inline. * Copy write data inline.
* This function is used for "small" requests. Data which is passed * This function is used for "small" requests. Data which is passed
...@@ -508,24 +716,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) ...@@ -508,24 +716,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
struct rpc_xprt *xprt = rqst->rq_xprt; struct rpc_xprt *xprt = rqst->rq_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
char *base;
size_t rpclen;
ssize_t hdrlen;
enum rpcrdma_chunktype rtype, wtype; enum rpcrdma_chunktype rtype, wtype;
struct rpcrdma_msg *headerp; struct rpcrdma_msg *headerp;
unsigned int pos;
ssize_t hdrlen;
size_t rpclen;
__be32 *iptr;
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)) if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
return rpcrdma_bc_marshal_reply(rqst); return rpcrdma_bc_marshal_reply(rqst);
#endif #endif
/*
* rpclen gets amount of data in first buffer, which is the
* pre-registered buffer.
*/
base = rqst->rq_svec[0].iov_base;
rpclen = rqst->rq_svec[0].iov_len;
headerp = rdmab_to_msg(req->rl_rdmabuf); headerp = rdmab_to_msg(req->rl_rdmabuf);
/* don't byte-swap XID, it's already done in request */ /* don't byte-swap XID, it's already done in request */
headerp->rm_xid = rqst->rq_xid; headerp->rm_xid = rqst->rq_xid;
...@@ -565,8 +767,12 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) ...@@ -565,8 +767,12 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
*/ */
if (rpcrdma_args_inline(r_xprt, rqst)) { if (rpcrdma_args_inline(r_xprt, rqst)) {
rtype = rpcrdma_noch; rtype = rpcrdma_noch;
rpcrdma_inline_pullup(rqst);
rpclen = rqst->rq_svec[0].iov_len;
} else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) { } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
rtype = rpcrdma_readch; rtype = rpcrdma_readch;
rpclen = rqst->rq_svec[0].iov_len;
rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
} else { } else {
r_xprt->rx_stats.nomsg_call_count++; r_xprt->rx_stats.nomsg_call_count++;
headerp->rm_type = htonl(RDMA_NOMSG); headerp->rm_type = htonl(RDMA_NOMSG);
...@@ -574,52 +780,49 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) ...@@ -574,52 +780,49 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
rpclen = 0; rpclen = 0;
} }
/* The following simplification is not true forever */ /* This implementation supports the following combinations
if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) * of chunk lists in one RPC-over-RDMA Call message:
wtype = rpcrdma_noch; *
if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { * - Read list
dprintk("RPC: %s: cannot marshal multiple chunk lists\n", * - Write list
__func__); * - Reply chunk
return -EIO; * - Read list + Reply chunk
} *
* It might not yet support the following combinations:
hdrlen = RPCRDMA_HDRLEN_MIN; *
* - Read list + Write list
/* *
* Pull up any extra send data into the preregistered buffer. * It does not support the following combinations:
* When padding is in use and applies to the transfer, insert *
* it and change the message type. * - Write list + Reply chunk
* - Read list + Write list + Reply chunk
*
* This implementation supports only a single chunk in each
* Read or Write list. Thus for example the client cannot
* send a Call message with a Position Zero Read chunk and a
* regular Read chunk at the same time.
*/ */
if (rtype == rpcrdma_noch) { req->rl_nchunks = 0;
req->rl_nextseg = req->rl_segments;
rpcrdma_inline_pullup(rqst); iptr = headerp->rm_body.rm_chunks;
iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; if (IS_ERR(iptr))
headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; goto out_unmap;
headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
/* new length after pullup */ if (IS_ERR(iptr))
rpclen = rqst->rq_svec[0].iov_len; goto out_unmap;
} else if (rtype == rpcrdma_readch) iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf); if (IS_ERR(iptr))
if (rtype != rpcrdma_noch) { goto out_unmap;
hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
headerp, rtype);
wtype = rtype; /* simplify dprintk */
} else if (wtype != rpcrdma_noch) {
hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
headerp, wtype);
}
if (hdrlen < 0)
return hdrlen;
if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
goto out_overflow; goto out_overflow;
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd" dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
" headerp 0x%p base 0x%p lkey 0x%x\n", rqst->rq_task->tk_pid, __func__,
__func__, transfertypes[wtype], hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype],
headerp, base, rdmab_lkey(req->rl_rdmabuf)); hdrlen, rpclen);
req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
req->rl_send_iov[0].length = hdrlen; req->rl_send_iov[0].length = hdrlen;
...@@ -637,12 +840,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) ...@@ -637,12 +840,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
return 0; return 0;
out_overflow: out_overflow:
pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s\n", pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n",
hdrlen, rpclen, transfertypes[wtype]); hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]);
/* Terminate this RPC. Chunks registered above will be /* Terminate this RPC. Chunks registered above will be
* released by xprt_release -> xprt_rmda_free . * released by xprt_release -> xprt_rmda_free .
*/ */
return -EIO; return -EIO;
out_unmap:
for (pos = 0; req->rl_nchunks--;)
pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
&req->rl_segments[pos]);
return PTR_ERR(iptr);
} }
/* /*
......
...@@ -184,7 +184,9 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb) ...@@ -184,7 +184,9 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
*/ */
#define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE) #define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE)
#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
/* data segments + head/tail for Call + head/tail for Reply */
#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 4)
struct rpcrdma_buffer; struct rpcrdma_buffer;
...@@ -298,6 +300,7 @@ struct rpcrdma_req { ...@@ -298,6 +300,7 @@ struct rpcrdma_req {
struct rpcrdma_regbuf *rl_rdmabuf; struct rpcrdma_regbuf *rl_rdmabuf;
struct rpcrdma_regbuf *rl_sendbuf; struct rpcrdma_regbuf *rl_sendbuf;
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
struct rpcrdma_mr_seg *rl_nextseg;
struct ib_cqe rl_cqe; struct ib_cqe rl_cqe;
struct list_head rl_all; struct list_head rl_all;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment