Commit 264b0cdb authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Replace rpcrdma_count_chunks()

Clean up chunk list decoding by using the xdr_stream set up in
rpcrdma_reply_handler. This hardens decoding by checking for buffer
overflow at every step while unmarshaling variable-length XDR
objects.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 07ff2dd5
...@@ -792,48 +792,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) ...@@ -792,48 +792,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
return PTR_ERR(iptr); return PTR_ERR(iptr);
} }
/*
* Chase down a received write or reply chunklist to get length
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
*/
static int
rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
{
unsigned int i, total_len;
struct rpcrdma_write_chunk *cur_wchunk;
char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
i = be32_to_cpu(**iptrp);
cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
total_len = 0;
while (i--) {
struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
ifdebug(FACILITY) {
u64 off;
xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n",
__func__,
be32_to_cpu(seg->rs_length),
(unsigned long long)off,
be32_to_cpu(seg->rs_handle));
}
total_len += be32_to_cpu(seg->rs_length);
++cur_wchunk;
}
/* check and adjust for properly terminated write chunk */
if (wrchunk) {
__be32 *w = (__be32 *) cur_wchunk;
if (*w++ != xdr_zero)
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) w;
}
if ((char *)cur_wchunk > base + rep->rr_len)
return -1;
*iptrp = (__be32 *) cur_wchunk;
return total_len;
}
/** /**
* rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
* @rqst: controlling RPC request * @rqst: controlling RPC request
...@@ -1004,89 +962,164 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, ...@@ -1004,89 +962,164 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
} }
#endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */
static int static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
struct rpc_rqst *rqst)
{ {
struct xdr_stream *xdr = &rep->rr_stream;
int rdmalen, status;
__be32 *p; __be32 *p;
p = xdr_inline_decode(xdr, 2 * sizeof(*p)); p = xdr_inline_decode(xdr, 4 * sizeof(*p));
if (unlikely(!p)) if (unlikely(!p))
return -EIO; return -EIO;
/* never expect read list */ ifdebug(FACILITY) {
if (unlikely(*p++ != xdr_zero)) u64 offset;
return -EIO; u32 handle;
/* Write list */ handle = be32_to_cpup(p++);
if (*p != xdr_zero) { *length = be32_to_cpup(p++);
char *base = rep->rr_hdrbuf.head[0].iov_base; xdr_decode_hyper(p, &offset);
dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
__func__, *length, (unsigned long long)offset,
handle);
} else {
*length = be32_to_cpup(p + 1);
}
p++; return 0;
rdmalen = rpcrdma_count_chunks(rep, 1, &p); }
if (rdmalen < 0 || *p++ != xdr_zero)
static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
{
u32 segcount, seglength;
__be32 *p;
p = xdr_inline_decode(xdr, sizeof(*p));
if (unlikely(!p))
return -EIO;
*length = 0;
segcount = be32_to_cpup(p);
while (segcount--) {
if (decode_rdma_segment(xdr, &seglength))
return -EIO; return -EIO;
*length += seglength;
}
rep->rr_len -= (char *)p - base; dprintk("RPC: %s: segcount=%u, %u bytes\n",
status = rep->rr_len + rdmalen; __func__, be32_to_cpup(p), *length);
r_xprt->rx_stats.total_rdma_reply += rdmalen; return 0;
}
/* special case - last segment may omit padding */ /* In RPC-over-RDMA Version One replies, a Read list is never
rdmalen &= 3; * expected. This decoder is a stub that returns an error if
if (rdmalen) { * a Read list is present.
rdmalen = 4 - rdmalen; */
status += rdmalen; static int decode_read_list(struct xdr_stream *xdr)
} {
} else { __be32 *p;
p = xdr_inline_decode(xdr, sizeof(*p));
if (unlikely(!p))
return -EIO;
if (unlikely(*p != xdr_zero))
return -EIO;
return 0;
}
/* Supports only one Write chunk in the Write list
*/
static int decode_write_list(struct xdr_stream *xdr, u32 *length)
{
u32 chunklen;
bool first;
__be32 *p;
*length = 0;
first = true;
do {
p = xdr_inline_decode(xdr, sizeof(*p)); p = xdr_inline_decode(xdr, sizeof(*p));
if (!p) if (unlikely(!p))
return -EIO;
if (*p == xdr_zero)
break;
if (!first)
return -EIO; return -EIO;
/* never expect reply chunk */ if (decode_write_chunk(xdr, &chunklen))
if (*p++ != xdr_zero)
return -EIO; return -EIO;
rdmalen = 0; *length += chunklen;
rep->rr_len -= RPCRDMA_HDRLEN_MIN; first = false;
status = rep->rr_len; } while (true);
} return 0;
}
static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
{
__be32 *p;
p = xdr_inline_decode(xdr, sizeof(*p));
if (unlikely(!p))
return -EIO;
*length = 0;
if (*p != xdr_zero)
if (decode_write_chunk(xdr, length))
return -EIO;
return 0;
}
static int
rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
struct rpc_rqst *rqst)
{
struct xdr_stream *xdr = &rep->rr_stream;
u32 writelist, replychunk, rpclen;
char *base;
/* Decode the chunk lists */
if (decode_read_list(xdr))
return -EIO;
if (decode_write_list(xdr, &writelist))
return -EIO;
if (decode_reply_chunk(xdr, &replychunk))
return -EIO;
/* RDMA_MSG sanity checks */
if (unlikely(replychunk))
return -EIO;
/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
base = (char *)xdr_inline_decode(xdr, 0);
rpclen = xdr_stream_remaining(xdr);
r_xprt->rx_stats.fixup_copy_count += r_xprt->rx_stats.fixup_copy_count +=
rpcrdma_inline_fixup(rqst, (char *)p, rep->rr_len, rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
rdmalen);
return status; r_xprt->rx_stats.total_rdma_reply += writelist;
return rpclen + xdr_align_size(writelist);
} }
static noinline int static noinline int
rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
{ {
struct xdr_stream *xdr = &rep->rr_stream; struct xdr_stream *xdr = &rep->rr_stream;
int rdmalen; u32 writelist, replychunk;
__be32 *p;
p = xdr_inline_decode(xdr, 3 * sizeof(*p)); /* Decode the chunk lists */
if (unlikely(!p)) if (decode_read_list(xdr))
return -EIO; return -EIO;
if (decode_write_list(xdr, &writelist))
/* never expect Read chunks */
if (unlikely(*p++ != xdr_zero))
return -EIO; return -EIO;
/* never expect Write chunks */ if (decode_reply_chunk(xdr, &replychunk))
if (unlikely(*p++ != xdr_zero))
return -EIO;
/* always expect a Reply chunk */
if (unlikely(*p++ == xdr_zero))
return -EIO; return -EIO;
rdmalen = rpcrdma_count_chunks(rep, 0, &p); /* RDMA_NOMSG sanity checks */
if (rdmalen < 0) if (unlikely(writelist))
return -EIO;
if (unlikely(!replychunk))
return -EIO; return -EIO;
r_xprt->rx_stats.total_rdma_reply += rdmalen;
/* Reply chunk buffer already is the reply vector - no fixup. */ /* Reply chunk buffer already is the reply vector */
return rdmalen; r_xprt->rx_stats.total_rdma_reply += replychunk;
return replychunk;
} }
static noinline int static noinline int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment