Commit 28d9d56f authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Remove imul instructions from rpcrdma_convert_iovs()

Re-arrange the pointer arithmetic in rpcrdma_convert_iovs() to
eliminate several integer multiplication instructions during
Transport Header encoding.

Also, array overflow does not occur outside development
environments, so replace overflow checking with one spot check
at the end. This reduces the number of conditional branches in
the common case.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 7ec910e7
...@@ -169,40 +169,41 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, ...@@ -169,40 +169,41 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read; return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
} }
/* Split "vec" on page boundaries into segments. FMR registers pages, /* Split @vec on page boundaries into SGEs. FMR registers pages, not
* not a byte range. Other modes coalesce these segments into a single * a byte range. Other modes coalesce these SGEs into a single MR
* MR when they can. * when they can.
*
* Returns pointer to next available SGE, and bumps the total number
* of SGEs consumed.
*/ */
static int static struct rpcrdma_mr_seg *
rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
unsigned int *n)
{ {
size_t page_offset; u32 remaining, page_offset;
u32 remaining;
char *base; char *base;
base = vec->iov_base; base = vec->iov_base;
page_offset = offset_in_page(base); page_offset = offset_in_page(base);
remaining = vec->iov_len; remaining = vec->iov_len;
while (remaining && n < RPCRDMA_MAX_SEGS) { while (remaining) {
seg[n].mr_page = NULL; seg->mr_page = NULL;
seg[n].mr_offset = base; seg->mr_offset = base;
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining); seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
remaining -= seg[n].mr_len; remaining -= seg->mr_len;
base += seg[n].mr_len; base += seg->mr_len;
++n; ++seg;
++(*n);
page_offset = 0; page_offset = 0;
} }
return n; return seg;
} }
/* /* Convert @xdrbuf into SGEs no larger than a page each. As they
* Chunk assembly from upper layer xdr_buf. * are registered, these SGEs are then coalesced into RDMA segments
* * when the selected memreg mode supports it.
* Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
* elements. Segments are then coalesced when registered, if possible
* within the selected memreg mode.
* *
* Returns positive number of segments converted, or a negative errno. * Returns positive number of SGEs consumed, or a negative errno.
*/ */
static int static int
...@@ -210,47 +211,41 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ...@@ -210,47 +211,41 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
unsigned int pos, enum rpcrdma_chunktype type, unsigned int pos, enum rpcrdma_chunktype type,
struct rpcrdma_mr_seg *seg) struct rpcrdma_mr_seg *seg)
{ {
int len, n, p, page_base; unsigned long page_base;
unsigned int len, n;
struct page **ppages; struct page **ppages;
n = 0; n = 0;
if (pos == 0) { if (pos == 0)
n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n); seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
if (n == RPCRDMA_MAX_SEGS)
goto out_overflow;
}
len = xdrbuf->page_len; len = xdrbuf->page_len;
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
page_base = offset_in_page(xdrbuf->page_base); page_base = offset_in_page(xdrbuf->page_base);
p = 0; while (len) {
while (len && n < RPCRDMA_MAX_SEGS) { if (unlikely(!*ppages)) {
if (!ppages[p]) { /* XXX: Certain upper layer operations do
/* alloc the pagelist for receiving buffer */ * not provide receive buffer pages.
ppages[p] = alloc_page(GFP_ATOMIC); */
if (!ppages[p]) *ppages = alloc_page(GFP_ATOMIC);
if (!*ppages)
return -EAGAIN; return -EAGAIN;
} }
seg[n].mr_page = ppages[p]; seg->mr_page = *ppages;
seg[n].mr_offset = (void *)(unsigned long) page_base; seg->mr_offset = (char *)page_base;
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
if (seg[n].mr_len > PAGE_SIZE) len -= seg->mr_len;
goto out_overflow; ++ppages;
len -= seg[n].mr_len; ++seg;
++n; ++n;
++p; page_base = 0;
page_base = 0; /* page offset only applies to first page */
} }
/* Message overflows the seg array */
if (len && n == RPCRDMA_MAX_SEGS)
goto out_overflow;
/* When encoding a Read chunk, the tail iovec contains an /* When encoding a Read chunk, the tail iovec contains an
* XDR pad and may be omitted. * XDR pad and may be omitted.
*/ */
if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
return n; goto out;
/* When encoding a Write chunk, some servers need to see an /* When encoding a Write chunk, some servers need to see an
* extra segment for non-XDR-aligned Write chunks. The upper * extra segment for non-XDR-aligned Write chunks. The upper
...@@ -258,19 +253,15 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ...@@ -258,19 +253,15 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
* for this purpose. * for this purpose.
*/ */
if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
return n; goto out;
if (xdrbuf->tail[0].iov_len) {
n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
if (n == RPCRDMA_MAX_SEGS)
goto out_overflow;
}
return n; if (xdrbuf->tail[0].iov_len)
seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
out_overflow: out:
pr_err("rpcrdma: segment array overflow\n"); if (unlikely(n > RPCRDMA_MAX_SEGS))
return -EIO; return -EIO;
return n;
} }
static inline int static inline int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment