Commit cc22ef66 authored by Trond Myklebust's avatar Trond Myklebust Committed by Luis Henriques

SUNRPC: Fix locking around callback channel reply receive

commit 093a1468 upstream.

Both xprt_lookup_rqst() and xprt_complete_rqst() require that you
take the transport lock in order to avoid races with xprt_transmit().
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
Reviewed-by: default avatarJeff Layton <jlayton@primarydata.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
[ luis: backported to 3.16: adjusted context ]
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 72349522
......@@ -1000,17 +1000,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
xid = *p++;
calldir = *p;
if (bc_xprt)
req = xprt_lookup_rqst(bc_xprt, xid);
if (!req) {
printk(KERN_NOTICE
"%s: Got unrecognized reply: "
"calldir 0x%x xpt_bc_xprt %p xid %08x\n",
__func__, ntohl(calldir),
bc_xprt, xid);
if (!bc_xprt)
return -EAGAIN;
}
spin_lock_bh(&bc_xprt->transport_lock);
req = xprt_lookup_rqst(bc_xprt, xid);
if (!req)
goto unlock_notfound;
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
/*
......@@ -1021,11 +1016,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
dst = &req->rq_private_buf.head[0];
src = &rqstp->rq_arg.head[0];
if (dst->iov_len < src->iov_len)
return -EAGAIN; /* whatever; just giving up. */
goto unlock_eagain; /* whatever; just giving up. */
memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
rqstp->rq_arg.len = 0;
spin_unlock_bh(&bc_xprt->transport_lock);
return 0;
unlock_notfound:
printk(KERN_NOTICE
"%s: Got unrecognized reply: "
"calldir 0x%x xpt_bc_xprt %p xid %08x\n",
__func__, ntohl(calldir),
bc_xprt, ntohl(xid));
unlock_eagain:
spin_unlock_bh(&bc_xprt->transport_lock);
return -EAGAIN;
}
static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment