Commit c6f5b47f authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Shrink send SGEs array

We no longer need to accommodate an xdr_buf whose pages start at an
offset and cross extra page boundaries. If there are more partial or
whole pages to send than there are available SGEs, the marshaling
logic is now smart enough to use a Read chunk instead of failing.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 16f906d6
...@@ -305,16 +305,19 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ ...@@ -305,16 +305,19 @@ struct rpcrdma_mr_seg { /* chunk descriptors */
char *mr_offset; /* kva if no page, else offset */ char *mr_offset; /* kva if no page, else offset */
}; };
/* Reserve enough Send SGEs to send a maximum size inline request: /* The Send SGE array is provisioned to send a maximum size
* inline request:
* - RPC-over-RDMA header * - RPC-over-RDMA header
* - xdr_buf head iovec * - xdr_buf head iovec
* - RPCRDMA_MAX_INLINE bytes, possibly unaligned, in pages * - RPCRDMA_MAX_INLINE bytes, in pages
* - xdr_buf tail iovec * - xdr_buf tail iovec
*
* The actual number of array elements consumed by each RPC
* depends on the device's max_sge limit.
*/ */
enum { enum {
RPCRDMA_MIN_SEND_SGES = 3, RPCRDMA_MIN_SEND_SGES = 3,
RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1, RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1,
RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment