Commit 91a10c52 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Use same device when mapping or syncing DMA buffers

When the underlying device driver is reloaded, ia->ri_device will be
replaced. All cached copies of that device pointer have to be
updated as well.

Commit 54cbd6b0 ("xprtrdma: Delay DMA mapping Send and Receive
buffers") added the rg_device field to each regbuf. As part of
handling a device removal, rpcrdma_dma_unmap_regbuf is invoked on
all regbufs for a transport.

Simply calling rpcrdma_dma_map_regbuf for each Receive buffer after
the driver has been reloaded should reinitialize rg_device correctly
for every case except rpcrdma_wc_receive, which still uses
rpcrdma_rep::rr_device.

Ensure the same device that was used to map a Receive buffer is also
used to sync it in rpcrdma_wc_receive by using rg_device there
instead of rr_device.

This is the only use of rr_device, so it can be removed.

The use of regbufs in the send path is also updated, for
completeness.

Fixes: 54cbd6b0 ("xprtrdma: Delay DMA mapping Send and ... ")
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent fff09594
...@@ -494,7 +494,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -494,7 +494,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
} }
sge->length = len; sge->length = len;
ib_dma_sync_single_for_device(ia->ri_device, sge->addr, ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
sge->length, DMA_TO_DEVICE); sge->length, DMA_TO_DEVICE);
req->rl_send_wr.num_sge++; req->rl_send_wr.num_sge++;
return true; return true;
...@@ -523,7 +523,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, ...@@ -523,7 +523,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
sge[sge_no].addr = rdmab_addr(rb); sge[sge_no].addr = rdmab_addr(rb);
sge[sge_no].length = xdr->head[0].iov_len; sge[sge_no].length = xdr->head[0].iov_len;
sge[sge_no].lkey = rdmab_lkey(rb); sge[sge_no].lkey = rdmab_lkey(rb);
ib_dma_sync_single_for_device(device, sge[sge_no].addr, ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
sge[sge_no].length, DMA_TO_DEVICE); sge[sge_no].length, DMA_TO_DEVICE);
/* If there is a Read chunk, the page list is being handled /* If there is a Read chunk, the page list is being handled
......
...@@ -180,7 +180,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) ...@@ -180,7 +180,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
rep->rr_wc_flags = wc->wc_flags; rep->rr_wc_flags = wc->wc_flags;
rep->rr_inv_rkey = wc->ex.invalidate_rkey; rep->rr_inv_rkey = wc->ex.invalidate_rkey;
ib_dma_sync_single_for_cpu(rep->rr_device, ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
rdmab_addr(rep->rr_rdmabuf), rdmab_addr(rep->rr_rdmabuf),
rep->rr_len, DMA_FROM_DEVICE); rep->rr_len, DMA_FROM_DEVICE);
...@@ -878,7 +878,6 @@ struct rpcrdma_rep * ...@@ -878,7 +878,6 @@ struct rpcrdma_rep *
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{ {
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rpcrdma_rep *rep; struct rpcrdma_rep *rep;
int rc; int rc;
...@@ -894,7 +893,6 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) ...@@ -894,7 +893,6 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
goto out_free; goto out_free;
} }
rep->rr_device = ia->ri_device;
rep->rr_cqe.done = rpcrdma_wc_receive; rep->rr_cqe.done = rpcrdma_wc_receive;
rep->rr_rxprt = r_xprt; rep->rr_rxprt = r_xprt;
INIT_WORK(&rep->rr_work, rpcrdma_reply_handler); INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
...@@ -1232,17 +1230,19 @@ rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, ...@@ -1232,17 +1230,19 @@ rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
bool bool
__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{ {
struct ib_device *device = ia->ri_device;
if (rb->rg_direction == DMA_NONE) if (rb->rg_direction == DMA_NONE)
return false; return false;
rb->rg_iov.addr = ib_dma_map_single(ia->ri_device, rb->rg_iov.addr = ib_dma_map_single(device,
(void *)rb->rg_base, (void *)rb->rg_base,
rdmab_length(rb), rdmab_length(rb),
rb->rg_direction); rb->rg_direction);
if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb))) if (ib_dma_mapping_error(device, rdmab_addr(rb)))
return false; return false;
rb->rg_device = ia->ri_device; rb->rg_device = device;
rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
return true; return true;
} }
......
...@@ -164,6 +164,12 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb) ...@@ -164,6 +164,12 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
return (struct rpcrdma_msg *)rb->rg_base; return (struct rpcrdma_msg *)rb->rg_base;
} }
static inline struct ib_device *
rdmab_device(struct rpcrdma_regbuf *rb)
{
return rb->rg_device;
}
#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN) #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
/* To ensure a transport can always make forward progress, /* To ensure a transport can always make forward progress,
...@@ -209,7 +215,6 @@ struct rpcrdma_rep { ...@@ -209,7 +215,6 @@ struct rpcrdma_rep {
unsigned int rr_len; unsigned int rr_len;
int rr_wc_flags; int rr_wc_flags;
u32 rr_inv_rkey; u32 rr_inv_rkey;
struct ib_device *rr_device;
struct rpcrdma_xprt *rr_rxprt; struct rpcrdma_xprt *rr_rxprt;
struct work_struct rr_work; struct work_struct rr_work;
struct list_head rr_list; struct list_head rr_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment