Commit fcbeced5 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Move read list XDR round-up logic

This is a pre-requisite for a subsequent patch.

Read list XDR round-up needs to be done _before_ additional inline
content is copied to the end of the XDR buffer's page list. Move
the logic added by commit e560e3b5 ("svcrdma: Add zero padding
if the client doesn't send it").
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 0b056c22
...@@ -43,7 +43,6 @@ ...@@ -43,7 +43,6 @@
#include <linux/sunrpc/debug.h> #include <linux/sunrpc/debug.h>
#include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/rpc_rdma.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/highmem.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
...@@ -434,6 +433,15 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, ...@@ -434,6 +433,15 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
} }
} }
/* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
if (page_offset & 3) {
u32 pad = 4 - (page_offset & 3);
head->arg.page_len += pad;
head->arg.len += pad;
head->arg.buflen += pad;
}
ret = 1; ret = 1;
head->position = position; head->position = position;
...@@ -446,32 +454,6 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, ...@@ -446,32 +454,6 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
return ret; return ret;
} }
/*
* To avoid a separate RDMA READ just for a handful of zero bytes,
* RFC 5666 section 3.7 allows the client to omit the XDR zero pad
* in chunk lists.
*/
static void
rdma_fix_xdr_pad(struct xdr_buf *buf)
{
unsigned int page_len = buf->page_len;
unsigned int size = (XDR_QUADLEN(page_len) << 2) - page_len;
unsigned int offset, pg_no;
char *p;
if (size == 0)
return;
pg_no = page_len >> PAGE_SHIFT;
offset = page_len & ~PAGE_MASK;
p = page_address(buf->pages[pg_no]);
memset(p + offset, 0, size);
buf->page_len += size;
buf->buflen += size;
buf->len += size;
}
static int rdma_read_complete(struct svc_rqst *rqstp, static int rdma_read_complete(struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *head) struct svc_rdma_op_ctxt *head)
{ {
...@@ -499,7 +481,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp, ...@@ -499,7 +481,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
} }
/* Point rq_arg.pages past header */ /* Point rq_arg.pages past header */
rdma_fix_xdr_pad(&head->arg);
rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
rqstp->rq_arg.page_len = head->arg.page_len; rqstp->rq_arg.page_len = head->arg.page_len;
rqstp->rq_arg.page_base = head->arg.page_base; rqstp->rq_arg.page_base = head->arg.page_base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment