Commit 773f6c5b authored by Chuck Lever's avatar Chuck Lever

svcrdma: Prevent a UAF in svc_rdma_send()

In some error flow cases, svc_rdma_wc_send() releases @ctxt. Copy
the sc_cid field in @ctxt to a stack variable in order to guarantee
that the value is available after the ib_post_send() call.

In case the new comment looks a little strange, this will be done
with at least one more field in a subsequent patch.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 5b9a8589
...@@ -316,12 +316,17 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) ...@@ -316,12 +316,17 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
* @rdma: transport on which to post the WR * @rdma: transport on which to post the WR
* @ctxt: send ctxt with a Send WR ready to post * @ctxt: send ctxt with a Send WR ready to post
* *
* Copy fields in @ctxt to stack variables in order to guarantee
* that these values remain available after the ib_post_send() call.
* In some error flow cases, svc_rdma_wc_send() releases @ctxt.
*
* Returns zero if the Send WR was posted successfully. Otherwise, a * Returns zero if the Send WR was posted successfully. Otherwise, a
* negative errno is returned. * negative errno is returned.
*/ */
int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
{ {
struct ib_send_wr *wr = &ctxt->sc_send_wr; struct ib_send_wr *wr = &ctxt->sc_send_wr;
struct rpc_rdma_cid cid = ctxt->sc_cid;
int ret; int ret;
might_sleep(); might_sleep();
...@@ -337,12 +342,12 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) ...@@ -337,12 +342,12 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
svc_rdma_wake_send_waiters(rdma, 1); svc_rdma_wake_send_waiters(rdma, 1);
percpu_counter_inc(&svcrdma_stat_sq_starve); percpu_counter_inc(&svcrdma_stat_sq_starve);
trace_svcrdma_sq_full(rdma, &ctxt->sc_cid); trace_svcrdma_sq_full(rdma, &cid);
wait_event(rdma->sc_send_wait, wait_event(rdma->sc_send_wait,
atomic_read(&rdma->sc_sq_avail) > 0); atomic_read(&rdma->sc_sq_avail) > 0);
if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
return -ENOTCONN; return -ENOTCONN;
trace_svcrdma_sq_retry(rdma, &ctxt->sc_cid); trace_svcrdma_sq_retry(rdma, &cid);
continue; continue;
} }
...@@ -353,7 +358,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) ...@@ -353,7 +358,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
return 0; return 0;
} }
trace_svcrdma_sq_post_err(rdma, &ctxt->sc_cid, ret); trace_svcrdma_sq_post_err(rdma, &cid, ret);
svc_xprt_deferred_close(&rdma->sc_xprt); svc_xprt_deferred_close(&rdma->sc_xprt);
svc_rdma_wake_send_waiters(rdma, 1); svc_rdma_wake_send_waiters(rdma, 1);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment