Commit 5fe1043d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Doug Ledford

svc_rdma: use local_dma_lkey

We now alwasy have a per-PD local_dma_lkey available.  Make use of that
fact in svc_rdma and stop registering our own MR.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Reviewed-by: default avatarJason Gunthorpe <jgunthorpe@obsidianresearch.com>
Reviewed-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Acked-by: default avatarJ. Bruce Fields <bfields@redhat.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5d252f90
...@@ -156,13 +156,11 @@ struct svcxprt_rdma { ...@@ -156,13 +156,11 @@ struct svcxprt_rdma {
struct ib_qp *sc_qp; struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq; struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq; struct ib_cq *sc_sq_cq;
struct ib_mr *sc_phys_mr; /* MR for server memory */
int (*sc_reader)(struct svcxprt_rdma *, int (*sc_reader)(struct svcxprt_rdma *,
struct svc_rqst *, struct svc_rqst *,
struct svc_rdma_op_ctxt *, struct svc_rdma_op_ctxt *,
int *, u32 *, u32, u32, u64, bool); int *, u32 *, u32, u32, u64, bool);
u32 sc_dev_caps; /* distilled device caps */ u32 sc_dev_caps; /* distilled device caps */
u32 sc_dma_lkey; /* local dma key */
unsigned int sc_frmr_pg_list_len; unsigned int sc_frmr_pg_list_len;
struct list_head sc_frmr_q; struct list_head sc_frmr_q;
spinlock_t sc_frmr_q_lock; spinlock_t sc_frmr_q_lock;
......
...@@ -128,7 +128,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, ...@@ -128,7 +128,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
ctxt->wr_op = IB_WR_SEND; ctxt->wr_op = IB_WR_SEND;
ctxt->direction = DMA_TO_DEVICE; ctxt->direction = DMA_TO_DEVICE;
ctxt->sge[0].lkey = rdma->sc_dma_lkey; ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
ctxt->sge[0].length = sndbuf->len; ctxt->sge[0].length = sndbuf->len;
ctxt->sge[0].addr = ctxt->sge[0].addr =
ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0, ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,
......
...@@ -144,6 +144,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, ...@@ -144,6 +144,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
head->arg.page_len += len; head->arg.page_len += len;
head->arg.len += len; head->arg.len += len;
if (!pg_off) if (!pg_off)
head->count++; head->count++;
...@@ -160,8 +161,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, ...@@ -160,8 +161,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
goto err; goto err;
atomic_inc(&xprt->sc_dma_used); atomic_inc(&xprt->sc_dma_used);
/* The lkey here is either a local dma lkey or a dma_mr lkey */ ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->sge[pno].lkey = xprt->sc_dma_lkey;
ctxt->sge[pno].length = len; ctxt->sge[pno].length = len;
ctxt->count++; ctxt->count++;
......
...@@ -265,7 +265,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, ...@@ -265,7 +265,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
sge[sge_no].addr)) sge[sge_no].addr))
goto err; goto err;
atomic_inc(&xprt->sc_dma_used); atomic_inc(&xprt->sc_dma_used);
sge[sge_no].lkey = xprt->sc_dma_lkey; sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->count++; ctxt->count++;
sge_off = 0; sge_off = 0;
sge_no++; sge_no++;
...@@ -480,7 +480,7 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -480,7 +480,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->count = 1; ctxt->count = 1;
/* Prepare the SGE for the RPCRDMA Header */ /* Prepare the SGE for the RPCRDMA Header */
ctxt->sge[0].lkey = rdma->sc_dma_lkey; ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr = ctxt->sge[0].addr =
ib_dma_map_page(rdma->sc_cm_id->device, page, 0, ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
...@@ -504,7 +504,7 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -504,7 +504,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->sge[sge_no].addr)) ctxt->sge[sge_no].addr))
goto err; goto err;
atomic_inc(&rdma->sc_dma_used); atomic_inc(&rdma->sc_dma_used);
ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey; ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
ctxt->sge[sge_no].length = sge_bytes; ctxt->sge[sge_no].length = sge_bytes;
} }
if (byte_count != 0) { if (byte_count != 0) {
......
...@@ -232,11 +232,11 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) ...@@ -232,11 +232,11 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
/* /*
* Unmap the DMA addr in the SGE if the lkey matches * Unmap the DMA addr in the SGE if the lkey matches
* the sc_dma_lkey, otherwise, ignore it since it is * the local_dma_lkey, otherwise, ignore it since it is
* an FRMR lkey and will be unmapped later when the * an FRMR lkey and will be unmapped later when the
* last WR that uses it completes. * last WR that uses it completes.
*/ */
if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { if (ctxt->sge[i].lkey == xprt->sc_pd->local_dma_lkey) {
atomic_dec(&xprt->sc_dma_used); atomic_dec(&xprt->sc_dma_used);
ib_dma_unmap_page(xprt->sc_cm_id->device, ib_dma_unmap_page(xprt->sc_cm_id->device,
ctxt->sge[i].addr, ctxt->sge[i].addr,
...@@ -698,7 +698,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) ...@@ -698,7 +698,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
atomic_inc(&xprt->sc_dma_used); atomic_inc(&xprt->sc_dma_used);
ctxt->sge[sge_no].addr = pa; ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE; ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->count = sge_no + 1; ctxt->count = sge_no + 1;
buflen += PAGE_SIZE; buflen += PAGE_SIZE;
} }
...@@ -1014,8 +1014,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1014,8 +1014,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
struct ib_cq_init_attr cq_attr = {}; struct ib_cq_init_attr cq_attr = {};
struct ib_qp_init_attr qp_attr; struct ib_qp_init_attr qp_attr;
struct ib_device *dev; struct ib_device *dev;
int uninitialized_var(dma_mr_acc);
int need_dma_mr = 0;
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
...@@ -1160,32 +1158,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1160,32 +1158,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
!rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num)) !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
goto errout; goto errout;
if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
!(dev->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
need_dma_mr = 1;
dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
}
if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num)) if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
/* Create the DMA MR if needed, otherwise, use the DMA LKEY */
if (need_dma_mr) {
/* Register all of physical memory */
newxprt->sc_phys_mr =
ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
if (IS_ERR(newxprt->sc_phys_mr)) {
dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
ret);
goto errout;
}
newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
} else
newxprt->sc_dma_lkey = dev->local_dma_lkey;
/* Post receive buffers */ /* Post receive buffers */
for (i = 0; i < newxprt->sc_rq_depth; i++) { for (i = 0; i < newxprt->sc_rq_depth; i++) {
ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
...@@ -1349,9 +1324,6 @@ static void __svc_rdma_free(struct work_struct *work) ...@@ -1349,9 +1324,6 @@ static void __svc_rdma_free(struct work_struct *work)
if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
ib_destroy_cq(rdma->sc_rq_cq); ib_destroy_cq(rdma->sc_rq_cq);
if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
ib_dereg_mr(rdma->sc_phys_mr);
if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
ib_dealloc_pd(rdma->sc_pd); ib_dealloc_pd(rdma->sc_pd);
...@@ -1479,7 +1451,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, ...@@ -1479,7 +1451,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
return; return;
} }
atomic_inc(&xprt->sc_dma_used); atomic_inc(&xprt->sc_dma_used);
ctxt->sge[0].lkey = xprt->sc_dma_lkey; ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->sge[0].length = length; ctxt->sge[0].length = length;
/* Prepare SEND WR */ /* Prepare SEND WR */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment