Commit 394b2c77 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Fix error handling in rpcrdma_prepare_msg_sges()

When this function fails, it needs to undo the DMA mappings it's
done so far. Otherwise these are leaked.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent ad99f053
...@@ -511,6 +511,28 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -511,6 +511,28 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return 0; return 0;
} }
/**
* rpcrdma_unmap_sges - DMA-unmap Send buffers
* @ia: interface adapter (device)
* @req: req with possibly some SGEs to be DMA unmapped
*
*/
void
rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
struct ib_sge *sge;
unsigned int count;
/* The first two SGEs contain the transport header and
* the inline buffer. These are always left mapped so
* they can be cheaply re-used.
*/
sge = &req->rl_send_sge[2];
for (count = req->rl_mapped_sges; count--; sge++)
ib_dma_unmap_page(ia->ri_device,
sge->addr, sge->length, DMA_TO_DEVICE);
}
/* Prepare the RPC-over-RDMA header SGE. /* Prepare the RPC-over-RDMA header SGE.
*/ */
static bool static bool
...@@ -641,10 +663,12 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -641,10 +663,12 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
return true; return true;
out_mapping_overflow: out_mapping_overflow:
rpcrdma_unmap_sges(ia, req);
pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
return false; return false;
out_mapping_err: out_mapping_err:
rpcrdma_unmap_sges(ia, req);
pr_err("rpcrdma: Send mapping error\n"); pr_err("rpcrdma: Send mapping error\n");
return false; return false;
} }
...@@ -671,20 +695,6 @@ rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -671,20 +695,6 @@ rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
return false; return false;
} }
void
rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
struct ib_device *device = ia->ri_device;
struct ib_sge *sge;
int count;
sge = &req->rl_send_sge[2];
for (count = req->rl_mapped_sges; count--; sge++)
ib_dma_unmap_page(device, sge->addr, sge->length,
DMA_TO_DEVICE);
req->rl_mapped_sges = 0;
}
/** /**
* rpcrdma_marshal_req - Marshal and send one RPC request * rpcrdma_marshal_req - Marshal and send one RPC request
* @r_xprt: controlling transport * @r_xprt: controlling transport
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment