Commit 96ceddea authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Remove usage of "mw"

Clean up: struct rpcrdma_mw was named after Memory Windows, but
xprtrdma no longer supports a Memory Window registration mode.
Rename rpcrdma_mw and its fields to reduce confusion and make
the code more sensible to read.

Renaming "mw" was suggested by Tom Talpey, the author of the
original xprtrdma implementation. It's a good idea, but I haven't
done this until now because it's a huge diffstat for no benefit
other than code readability.

However, I'm about to introduce static trace points that expose
a few of xprtrdma's internal data structures. They should make sense
in the trace report, and it's reasonable to treat trace points as a
kernel API contract which might be difficult to change later.

While I'm churning things up, two additional changes:
- rename variables unhelpfully called "r" to "mr", to improve code
  clarity, and
- rename the MR-related helper functions using the form
  "rpcrdma_mr_<verb>", to be consistent with other areas of the
  code.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent ce5b3717
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (c) 2015 Oracle. All rights reserved. * Copyright (c) 2015, 2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*/ */
...@@ -47,7 +47,7 @@ fmr_is_supported(struct rpcrdma_ia *ia) ...@@ -47,7 +47,7 @@ fmr_is_supported(struct rpcrdma_ia *ia)
} }
static int static int
fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw) fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{ {
static struct ib_fmr_attr fmr_attr = { static struct ib_fmr_attr fmr_attr = {
.max_pages = RPCRDMA_MAX_FMR_SGES, .max_pages = RPCRDMA_MAX_FMR_SGES,
...@@ -55,106 +55,106 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw) ...@@ -55,106 +55,106 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
.page_shift = PAGE_SHIFT .page_shift = PAGE_SHIFT
}; };
mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES, mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
sizeof(u64), GFP_KERNEL); sizeof(u64), GFP_KERNEL);
if (!mw->fmr.fm_physaddrs) if (!mr->fmr.fm_physaddrs)
goto out_free; goto out_free;
mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES, mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
sizeof(*mw->mw_sg), GFP_KERNEL); sizeof(*mr->mr_sg), GFP_KERNEL);
if (!mw->mw_sg) if (!mr->mr_sg)
goto out_free; goto out_free;
sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES); sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS, mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
&fmr_attr); &fmr_attr);
if (IS_ERR(mw->fmr.fm_mr)) if (IS_ERR(mr->fmr.fm_mr))
goto out_fmr_err; goto out_fmr_err;
return 0; return 0;
out_fmr_err: out_fmr_err:
dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__, dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
PTR_ERR(mw->fmr.fm_mr)); PTR_ERR(mr->fmr.fm_mr));
out_free: out_free:
kfree(mw->mw_sg); kfree(mr->mr_sg);
kfree(mw->fmr.fm_physaddrs); kfree(mr->fmr.fm_physaddrs);
return -ENOMEM; return -ENOMEM;
} }
static int static int
__fmr_unmap(struct rpcrdma_mw *mw) __fmr_unmap(struct rpcrdma_mr *mr)
{ {
LIST_HEAD(l); LIST_HEAD(l);
int rc; int rc;
list_add(&mw->fmr.fm_mr->list, &l); list_add(&mr->fmr.fm_mr->list, &l);
rc = ib_unmap_fmr(&l); rc = ib_unmap_fmr(&l);
list_del(&mw->fmr.fm_mr->list); list_del(&mr->fmr.fm_mr->list);
return rc; return rc;
} }
static void static void
fmr_op_release_mr(struct rpcrdma_mw *r) fmr_op_release_mr(struct rpcrdma_mr *mr)
{ {
LIST_HEAD(unmap_list); LIST_HEAD(unmap_list);
int rc; int rc;
/* Ensure MW is not on any rl_registered list */ /* Ensure MW is not on any rl_registered list */
if (!list_empty(&r->mw_list)) if (!list_empty(&mr->mr_list))
list_del(&r->mw_list); list_del(&mr->mr_list);
kfree(r->fmr.fm_physaddrs); kfree(mr->fmr.fm_physaddrs);
kfree(r->mw_sg); kfree(mr->mr_sg);
/* In case this one was left mapped, try to unmap it /* In case this one was left mapped, try to unmap it
* to prevent dealloc_fmr from failing with EBUSY * to prevent dealloc_fmr from failing with EBUSY
*/ */
rc = __fmr_unmap(r); rc = __fmr_unmap(mr);
if (rc) if (rc)
pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n", pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
r, rc); mr, rc);
rc = ib_dealloc_fmr(r->fmr.fm_mr); rc = ib_dealloc_fmr(mr->fmr.fm_mr);
if (rc) if (rc)
pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n", pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
r, rc); mr, rc);
kfree(r); kfree(mr);
} }
/* Reset of a single FMR. /* Reset of a single FMR.
*/ */
static void static void
fmr_op_recover_mr(struct rpcrdma_mw *mw) fmr_op_recover_mr(struct rpcrdma_mr *mr)
{ {
struct rpcrdma_xprt *r_xprt = mw->mw_xprt; struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
int rc; int rc;
/* ORDER: invalidate first */ /* ORDER: invalidate first */
rc = __fmr_unmap(mw); rc = __fmr_unmap(mr);
/* ORDER: then DMA unmap */ /* ORDER: then DMA unmap */
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
if (rc) if (rc)
goto out_release; goto out_release;
rpcrdma_put_mw(r_xprt, mw); rpcrdma_mr_put(mr);
r_xprt->rx_stats.mrs_recovered++; r_xprt->rx_stats.mrs_recovered++;
return; return;
out_release: out_release:
pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw); pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
r_xprt->rx_stats.mrs_orphaned++; r_xprt->rx_stats.mrs_orphaned++;
spin_lock(&r_xprt->rx_buf.rb_mwlock); spin_lock(&r_xprt->rx_buf.rb_mrlock);
list_del(&mw->mw_all); list_del(&mr->mr_all);
spin_unlock(&r_xprt->rx_buf.rb_mwlock); spin_unlock(&r_xprt->rx_buf.rb_mrlock);
fmr_op_release_mr(mw); fmr_op_release_mr(mr);
} }
static int static int
...@@ -180,15 +180,15 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt) ...@@ -180,15 +180,15 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
*/ */
static struct rpcrdma_mr_seg * static struct rpcrdma_mr_seg *
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, struct rpcrdma_mw **out) int nsegs, bool writing, struct rpcrdma_mr **out)
{ {
struct rpcrdma_mr_seg *seg1 = seg; struct rpcrdma_mr_seg *seg1 = seg;
int len, pageoff, i, rc; int len, pageoff, i, rc;
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
u64 *dma_pages; u64 *dma_pages;
mw = rpcrdma_get_mw(r_xprt); mr = rpcrdma_mr_get(r_xprt);
if (!mw) if (!mr)
return ERR_PTR(-ENOBUFS); return ERR_PTR(-ENOBUFS);
pageoff = offset_in_page(seg1->mr_offset); pageoff = offset_in_page(seg1->mr_offset);
...@@ -199,12 +199,12 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -199,12 +199,12 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
nsegs = RPCRDMA_MAX_FMR_SGES; nsegs = RPCRDMA_MAX_FMR_SGES;
for (i = 0; i < nsegs;) { for (i = 0; i < nsegs;) {
if (seg->mr_page) if (seg->mr_page)
sg_set_page(&mw->mw_sg[i], sg_set_page(&mr->mr_sg[i],
seg->mr_page, seg->mr_page,
seg->mr_len, seg->mr_len,
offset_in_page(seg->mr_offset)); offset_in_page(seg->mr_offset));
else else
sg_set_buf(&mw->mw_sg[i], seg->mr_offset, sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
seg->mr_len); seg->mr_len);
len += seg->mr_len; len += seg->mr_len;
++seg; ++seg;
...@@ -214,40 +214,40 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -214,40 +214,40 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break; break;
} }
mw->mw_dir = rpcrdma_data_dir(writing); mr->mr_dir = rpcrdma_data_dir(writing);
mw->mw_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device, mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, i, mw->mw_dir); mr->mr_sg, i, mr->mr_dir);
if (!mw->mw_nents) if (!mr->mr_nents)
goto out_dmamap_err; goto out_dmamap_err;
for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++) for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
dma_pages[i] = sg_dma_address(&mw->mw_sg[i]); dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents, rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
dma_pages[0]); dma_pages[0]);
if (rc) if (rc)
goto out_maperr; goto out_maperr;
mw->mw_handle = mw->fmr.fm_mr->rkey; mr->mr_handle = mr->fmr.fm_mr->rkey;
mw->mw_length = len; mr->mr_length = len;
mw->mw_offset = dma_pages[0] + pageoff; mr->mr_offset = dma_pages[0] + pageoff;
*out = mw; *out = mr;
return seg; return seg;
out_dmamap_err: out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
mw->mw_sg, i); mr->mr_sg, i);
rpcrdma_put_mw(r_xprt, mw); rpcrdma_mr_put(mr);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
out_maperr: out_maperr:
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
len, (unsigned long long)dma_pages[0], len, (unsigned long long)dma_pages[0],
pageoff, mw->mw_nents, rc); pageoff, mr->mr_nents, rc);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
rpcrdma_put_mw(r_xprt, mw); rpcrdma_mr_put(mr);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
...@@ -256,13 +256,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -256,13 +256,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
* Sleeps until it is safe for the host CPU to access the * Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions. * previously mapped memory regions.
* *
* Caller ensures that @mws is not empty before the call. This * Caller ensures that @mrs is not empty before the call. This
* function empties the list. * function empties the list.
*/ */
static void static void
fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
{ {
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
LIST_HEAD(unmap_list); LIST_HEAD(unmap_list);
int rc; int rc;
...@@ -271,10 +271,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -271,10 +271,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* ib_unmap_fmr() is slow, so use a single call instead * ib_unmap_fmr() is slow, so use a single call instead
* of one call per mapped FMR. * of one call per mapped FMR.
*/ */
list_for_each_entry(mw, mws, mw_list) { list_for_each_entry(mr, mrs, mr_list) {
dprintk("RPC: %s: unmapping fmr %p\n", dprintk("RPC: %s: unmapping fmr %p\n",
__func__, &mw->fmr); __func__, &mr->fmr);
list_add_tail(&mw->fmr.fm_mr->list, &unmap_list); list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
} }
r_xprt->rx_stats.local_inv_needed++; r_xprt->rx_stats.local_inv_needed++;
rc = ib_unmap_fmr(&unmap_list); rc = ib_unmap_fmr(&unmap_list);
...@@ -284,14 +284,14 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -284,14 +284,14 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
/* ORDER: Now DMA unmap all of the req's MRs, and return /* ORDER: Now DMA unmap all of the req's MRs, and return
* them to the free MW list. * them to the free MW list.
*/ */
while (!list_empty(mws)) { while (!list_empty(mrs)) {
mw = rpcrdma_pop_mw(mws); mr = rpcrdma_mr_pop(mrs);
dprintk("RPC: %s: DMA unmapping fmr %p\n", dprintk("RPC: %s: DMA unmapping fmr %p\n",
__func__, &mw->fmr); __func__, &mr->fmr);
list_del(&mw->fmr.fm_mr->list); list_del(&mr->fmr.fm_mr->list);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
rpcrdma_put_mw(r_xprt, mw); rpcrdma_mr_put(mr);
} }
return; return;
...@@ -299,10 +299,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -299,10 +299,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
out_reset: out_reset:
pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc); pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
while (!list_empty(mws)) { while (!list_empty(mrs)) {
mw = rpcrdma_pop_mw(mws); mr = rpcrdma_mr_pop(mrs);
list_del(&mw->fmr.fm_mr->list); list_del(&mr->fmr.fm_mr->list);
fmr_op_recover_mr(mw); fmr_op_recover_mr(mr);
} }
} }
......
This diff is collapsed.
...@@ -292,15 +292,15 @@ encode_item_not_present(struct xdr_stream *xdr) ...@@ -292,15 +292,15 @@ encode_item_not_present(struct xdr_stream *xdr)
} }
static void static void
xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw) xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
{ {
*iptr++ = cpu_to_be32(mw->mw_handle); *iptr++ = cpu_to_be32(mr->mr_handle);
*iptr++ = cpu_to_be32(mw->mw_length); *iptr++ = cpu_to_be32(mr->mr_length);
xdr_encode_hyper(iptr, mw->mw_offset); xdr_encode_hyper(iptr, mr->mr_offset);
} }
static int static int
encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw) encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
{ {
__be32 *p; __be32 *p;
...@@ -308,12 +308,12 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw) ...@@ -308,12 +308,12 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
if (unlikely(!p)) if (unlikely(!p))
return -EMSGSIZE; return -EMSGSIZE;
xdr_encode_rdma_segment(p, mw); xdr_encode_rdma_segment(p, mr);
return 0; return 0;
} }
static int static int
encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw, encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
u32 position) u32 position)
{ {
__be32 *p; __be32 *p;
...@@ -324,7 +324,7 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw, ...@@ -324,7 +324,7 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
*p++ = xdr_one; /* Item present */ *p++ = xdr_one; /* Item present */
*p++ = cpu_to_be32(position); *p++ = cpu_to_be32(position);
xdr_encode_rdma_segment(p, mw); xdr_encode_rdma_segment(p, mr);
return 0; return 0;
} }
...@@ -348,7 +348,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -348,7 +348,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{ {
struct xdr_stream *xdr = &req->rl_stream; struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg; struct rpcrdma_mr_seg *seg;
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
unsigned int pos; unsigned int pos;
int nsegs; int nsegs;
...@@ -363,21 +363,21 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -363,21 +363,21 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
do { do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
false, &mw); false, &mr);
if (IS_ERR(seg)) if (IS_ERR(seg))
return PTR_ERR(seg); return PTR_ERR(seg);
rpcrdma_push_mw(mw, &req->rl_registered); rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_read_segment(xdr, mw, pos) < 0) if (encode_read_segment(xdr, mr, pos) < 0)
return -EMSGSIZE; return -EMSGSIZE;
dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n", dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__, pos, rqst->rq_task->tk_pid, __func__, pos,
mw->mw_length, (unsigned long long)mw->mw_offset, mr->mr_length, (unsigned long long)mr->mr_offset,
mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); mr->mr_handle, mr->mr_nents < nsegs ? "more" : "last");
r_xprt->rx_stats.read_chunk_count++; r_xprt->rx_stats.read_chunk_count++;
nsegs -= mw->mw_nents; nsegs -= mr->mr_nents;
} while (nsegs); } while (nsegs);
return 0; return 0;
...@@ -404,7 +404,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -404,7 +404,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{ {
struct xdr_stream *xdr = &req->rl_stream; struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg; struct rpcrdma_mr_seg *seg;
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
int nsegs, nchunks; int nsegs, nchunks;
__be32 *segcount; __be32 *segcount;
...@@ -425,23 +425,23 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -425,23 +425,23 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0; nchunks = 0;
do { do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
true, &mw); true, &mr);
if (IS_ERR(seg)) if (IS_ERR(seg))
return PTR_ERR(seg); return PTR_ERR(seg);
rpcrdma_push_mw(mw, &req->rl_registered); rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mw) < 0) if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE; return -EMSGSIZE;
dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n", dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__, rqst->rq_task->tk_pid, __func__,
mw->mw_length, (unsigned long long)mw->mw_offset, mr->mr_length, (unsigned long long)mr->mr_offset,
mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); mr->mr_handle, mr->mr_nents < nsegs ? "more" : "last");
r_xprt->rx_stats.write_chunk_count++; r_xprt->rx_stats.write_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len; r_xprt->rx_stats.total_rdma_request += seg->mr_len;
nchunks++; nchunks++;
nsegs -= mw->mw_nents; nsegs -= mr->mr_nents;
} while (nsegs); } while (nsegs);
/* Update count of segments in this Write chunk */ /* Update count of segments in this Write chunk */
...@@ -468,7 +468,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -468,7 +468,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{ {
struct xdr_stream *xdr = &req->rl_stream; struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg; struct rpcrdma_mr_seg *seg;
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
int nsegs, nchunks; int nsegs, nchunks;
__be32 *segcount; __be32 *segcount;
...@@ -487,23 +487,23 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -487,23 +487,23 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0; nchunks = 0;
do { do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
true, &mw); true, &mr);
if (IS_ERR(seg)) if (IS_ERR(seg))
return PTR_ERR(seg); return PTR_ERR(seg);
rpcrdma_push_mw(mw, &req->rl_registered); rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mw) < 0) if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE; return -EMSGSIZE;
dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n", dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__, rqst->rq_task->tk_pid, __func__,
mw->mw_length, (unsigned long long)mw->mw_offset, mr->mr_length, (unsigned long long)mr->mr_offset,
mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); mr->mr_handle, mr->mr_nents < nsegs ? "more" : "last");
r_xprt->rx_stats.reply_chunk_count++; r_xprt->rx_stats.reply_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len; r_xprt->rx_stats.total_rdma_request += seg->mr_len;
nchunks++; nchunks++;
nsegs -= mw->mw_nents; nsegs -= mr->mr_nents;
} while (nsegs); } while (nsegs);
/* Update count of segments in the Reply chunk */ /* Update count of segments in the Reply chunk */
...@@ -821,10 +821,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) ...@@ -821,10 +821,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
* so these registrations are invalid and unusable. * so these registrations are invalid and unusable.
*/ */
while (unlikely(!list_empty(&req->rl_registered))) { while (unlikely(!list_empty(&req->rl_registered))) {
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
mw = rpcrdma_pop_mw(&req->rl_registered); mr = rpcrdma_mr_pop(&req->rl_registered);
rpcrdma_defer_mr_recovery(mw); rpcrdma_mr_defer_recovery(mr);
} }
/* This implementation supports the following combinations /* This implementation supports the following combinations
......
...@@ -71,8 +71,8 @@ ...@@ -71,8 +71,8 @@
/* /*
* internal functions * internal functions
*/ */
static void rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf); static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb); static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
struct workqueue_struct *rpcrdma_receive_wq __read_mostly; struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
...@@ -458,7 +458,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia) ...@@ -458,7 +458,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
} }
rpcrdma_destroy_mrs(buf); rpcrdma_mrs_destroy(buf);
/* Allow waiters to continue */ /* Allow waiters to continue */
complete(&ia->ri_remove_done); complete(&ia->ri_remove_done);
...@@ -671,7 +671,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, ...@@ -671,7 +671,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
goto out3; goto out3;
} }
rpcrdma_create_mrs(r_xprt); rpcrdma_mrs_create(r_xprt);
return 0; return 0;
out3: out3:
...@@ -992,15 +992,15 @@ rpcrdma_mr_recovery_worker(struct work_struct *work) ...@@ -992,15 +992,15 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
{ {
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
rb_recovery_worker.work); rb_recovery_worker.work);
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
spin_lock(&buf->rb_recovery_lock); spin_lock(&buf->rb_recovery_lock);
while (!list_empty(&buf->rb_stale_mrs)) { while (!list_empty(&buf->rb_stale_mrs)) {
mw = rpcrdma_pop_mw(&buf->rb_stale_mrs); mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock); spin_unlock(&buf->rb_recovery_lock);
dprintk("RPC: %s: recovering MR %p\n", __func__, mw); dprintk("RPC: %s: recovering MR %p\n", __func__, mr);
mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw); mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
spin_lock(&buf->rb_recovery_lock); spin_lock(&buf->rb_recovery_lock);
} }
...@@ -1008,20 +1008,20 @@ rpcrdma_mr_recovery_worker(struct work_struct *work) ...@@ -1008,20 +1008,20 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
} }
void void
rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
{ {
struct rpcrdma_xprt *r_xprt = mw->mw_xprt; struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
spin_lock(&buf->rb_recovery_lock); spin_lock(&buf->rb_recovery_lock);
rpcrdma_push_mw(mw, &buf->rb_stale_mrs); rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock); spin_unlock(&buf->rb_recovery_lock);
schedule_delayed_work(&buf->rb_recovery_worker, 0); schedule_delayed_work(&buf->rb_recovery_worker, 0);
} }
static void static void
rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt) rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
{ {
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_ia *ia = &r_xprt->rx_ia;
...@@ -1030,30 +1030,30 @@ rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt) ...@@ -1030,30 +1030,30 @@ rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
LIST_HEAD(all); LIST_HEAD(all);
for (count = 0; count < 32; count++) { for (count = 0; count < 32; count++) {
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
int rc; int rc;
mw = kzalloc(sizeof(*mw), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mw) if (!mr)
break; break;
rc = ia->ri_ops->ro_init_mr(ia, mw); rc = ia->ri_ops->ro_init_mr(ia, mr);
if (rc) { if (rc) {
kfree(mw); kfree(mr);
break; break;
} }
mw->mw_xprt = r_xprt; mr->mr_xprt = r_xprt;
list_add(&mw->mw_list, &free); list_add(&mr->mr_list, &free);
list_add(&mw->mw_all, &all); list_add(&mr->mr_all, &all);
} }
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mrlock);
list_splice(&free, &buf->rb_mws); list_splice(&free, &buf->rb_mrs);
list_splice(&all, &buf->rb_all); list_splice(&all, &buf->rb_all);
r_xprt->rx_stats.mrs_allocated += count; r_xprt->rx_stats.mrs_allocated += count;
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mrlock);
dprintk("RPC: %s: created %u MRs\n", __func__, count); dprintk("RPC: %s: created %u MRs\n", __func__, count);
} }
...@@ -1066,7 +1066,7 @@ rpcrdma_mr_refresh_worker(struct work_struct *work) ...@@ -1066,7 +1066,7 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf); rx_buf);
rpcrdma_create_mrs(r_xprt); rpcrdma_mrs_create(r_xprt);
} }
struct rpcrdma_req * struct rpcrdma_req *
...@@ -1144,10 +1144,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) ...@@ -1144,10 +1144,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_max_requests = r_xprt->rx_data.max_requests; buf->rb_max_requests = r_xprt->rx_data.max_requests;
buf->rb_bc_srv_max_requests = 0; buf->rb_bc_srv_max_requests = 0;
spin_lock_init(&buf->rb_mwlock); spin_lock_init(&buf->rb_mrlock);
spin_lock_init(&buf->rb_lock); spin_lock_init(&buf->rb_lock);
spin_lock_init(&buf->rb_recovery_lock); spin_lock_init(&buf->rb_recovery_lock);
INIT_LIST_HEAD(&buf->rb_mws); INIT_LIST_HEAD(&buf->rb_mrs);
INIT_LIST_HEAD(&buf->rb_all); INIT_LIST_HEAD(&buf->rb_all);
INIT_LIST_HEAD(&buf->rb_stale_mrs); INIT_LIST_HEAD(&buf->rb_stale_mrs);
INIT_DELAYED_WORK(&buf->rb_refresh_worker, INIT_DELAYED_WORK(&buf->rb_refresh_worker,
...@@ -1155,7 +1155,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) ...@@ -1155,7 +1155,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
INIT_DELAYED_WORK(&buf->rb_recovery_worker, INIT_DELAYED_WORK(&buf->rb_recovery_worker,
rpcrdma_mr_recovery_worker); rpcrdma_mr_recovery_worker);
rpcrdma_create_mrs(r_xprt); rpcrdma_mrs_create(r_xprt);
INIT_LIST_HEAD(&buf->rb_send_bufs); INIT_LIST_HEAD(&buf->rb_send_bufs);
INIT_LIST_HEAD(&buf->rb_allreqs); INIT_LIST_HEAD(&buf->rb_allreqs);
...@@ -1229,26 +1229,26 @@ rpcrdma_destroy_req(struct rpcrdma_req *req) ...@@ -1229,26 +1229,26 @@ rpcrdma_destroy_req(struct rpcrdma_req *req)
} }
static void static void
rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf) rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
{ {
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf); rx_buf);
struct rpcrdma_ia *ia = rdmab_to_ia(buf); struct rpcrdma_ia *ia = rdmab_to_ia(buf);
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
unsigned int count; unsigned int count;
count = 0; count = 0;
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mrlock);
while (!list_empty(&buf->rb_all)) { while (!list_empty(&buf->rb_all)) {
mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
list_del(&mw->mw_all); list_del(&mr->mr_all);
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mrlock);
ia->ri_ops->ro_release_mr(mw); ia->ri_ops->ro_release_mr(mr);
count++; count++;
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mrlock);
} }
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mrlock);
r_xprt->rx_stats.mrs_allocated = 0; r_xprt->rx_stats.mrs_allocated = 0;
dprintk("RPC: %s: released %u MRs\n", __func__, count); dprintk("RPC: %s: released %u MRs\n", __func__, count);
...@@ -1285,26 +1285,33 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) ...@@ -1285,26 +1285,33 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
spin_unlock(&buf->rb_reqslock); spin_unlock(&buf->rb_reqslock);
buf->rb_recv_count = 0; buf->rb_recv_count = 0;
rpcrdma_destroy_mrs(buf); rpcrdma_mrs_destroy(buf);
} }
struct rpcrdma_mw * /**
rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) * rpcrdma_mr_get - Allocate an rpcrdma_mr object
* @r_xprt: controlling transport
*
* Returns an initialized rpcrdma_mr or NULL if no free
* rpcrdma_mr objects are available.
*/
struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
{ {
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_mw *mw = NULL; struct rpcrdma_mr *mr = NULL;
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mrlock);
if (!list_empty(&buf->rb_mws)) if (!list_empty(&buf->rb_mrs))
mw = rpcrdma_pop_mw(&buf->rb_mws); mr = rpcrdma_mr_pop(&buf->rb_mrs);
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mrlock);
if (!mw) if (!mr)
goto out_nomws; goto out_nomrs;
return mw; return mr;
out_nomws: out_nomrs:
dprintk("RPC: %s: no MWs available\n", __func__); dprintk("RPC: %s: no MRs available\n", __func__);
if (r_xprt->rx_ep.rep_connected != -ENODEV) if (r_xprt->rx_ep.rep_connected != -ENODEV)
schedule_delayed_work(&buf->rb_refresh_worker, 0); schedule_delayed_work(&buf->rb_refresh_worker, 0);
...@@ -1314,14 +1321,20 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) ...@@ -1314,14 +1321,20 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
return NULL; return NULL;
} }
/**
* rpcrdma_mr_put - Release an rpcrdma_mr object
* @mr: object to release
*
*/
void void
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) rpcrdma_mr_put(struct rpcrdma_mr *mr)
{ {
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mrlock);
rpcrdma_push_mw(mw, &buf->rb_mws); rpcrdma_mr_push(mr, &buf->rb_mrs);
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mrlock);
} }
static struct rpcrdma_rep * static struct rpcrdma_rep *
......
...@@ -230,12 +230,12 @@ enum { ...@@ -230,12 +230,12 @@ enum {
}; };
/* /*
* struct rpcrdma_mw - external memory region metadata * struct rpcrdma_mr - external memory region metadata
* *
* An external memory region is any buffer or page that is registered * An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered). * on the fly (ie, not pre-registered).
* *
* Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During * Each rpcrdma_buffer has a list of free MWs anchored in rb_mrs. During
* call_allocate, rpcrdma_buffer_get() assigns one to each segment in * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
* an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
* track of registration metadata while each RPC is pending. * track of registration metadata while each RPC is pending.
...@@ -265,20 +265,20 @@ struct rpcrdma_fmr { ...@@ -265,20 +265,20 @@ struct rpcrdma_fmr {
u64 *fm_physaddrs; u64 *fm_physaddrs;
}; };
struct rpcrdma_mw { struct rpcrdma_mr {
struct list_head mw_list; struct list_head mr_list;
struct scatterlist *mw_sg; struct scatterlist *mr_sg;
int mw_nents; int mr_nents;
enum dma_data_direction mw_dir; enum dma_data_direction mr_dir;
union { union {
struct rpcrdma_fmr fmr; struct rpcrdma_fmr fmr;
struct rpcrdma_frwr frwr; struct rpcrdma_frwr frwr;
}; };
struct rpcrdma_xprt *mw_xprt; struct rpcrdma_xprt *mr_xprt;
u32 mw_handle; u32 mr_handle;
u32 mw_length; u32 mr_length;
u64 mw_offset; u64 mr_offset;
struct list_head mw_all; struct list_head mr_all;
}; };
/* /*
...@@ -371,19 +371,19 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) ...@@ -371,19 +371,19 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
} }
static inline void static inline void
rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list) rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
{ {
list_add_tail(&mw->mw_list, list); list_add_tail(&mr->mr_list, list);
} }
static inline struct rpcrdma_mw * static inline struct rpcrdma_mr *
rpcrdma_pop_mw(struct list_head *list) rpcrdma_mr_pop(struct list_head *list)
{ {
struct rpcrdma_mw *mw; struct rpcrdma_mr *mr;
mw = list_first_entry(list, struct rpcrdma_mw, mw_list); mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
list_del(&mw->mw_list); list_del(&mr->mr_list);
return mw; return mr;
} }
/* /*
...@@ -393,8 +393,8 @@ rpcrdma_pop_mw(struct list_head *list) ...@@ -393,8 +393,8 @@ rpcrdma_pop_mw(struct list_head *list)
* One of these is associated with a transport instance * One of these is associated with a transport instance
*/ */
struct rpcrdma_buffer { struct rpcrdma_buffer {
spinlock_t rb_mwlock; /* protect rb_mws list */ spinlock_t rb_mrlock; /* protect rb_mrs list */
struct list_head rb_mws; struct list_head rb_mrs;
struct list_head rb_all; struct list_head rb_all;
unsigned long rb_sc_head; unsigned long rb_sc_head;
...@@ -473,19 +473,19 @@ struct rpcrdma_memreg_ops { ...@@ -473,19 +473,19 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mr_seg * struct rpcrdma_mr_seg *
(*ro_map)(struct rpcrdma_xprt *, (*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool, struct rpcrdma_mr_seg *, int, bool,
struct rpcrdma_mw **); struct rpcrdma_mr **);
void (*ro_reminv)(struct rpcrdma_rep *rep, void (*ro_reminv)(struct rpcrdma_rep *rep,
struct list_head *mws); struct list_head *mrs);
void (*ro_unmap_sync)(struct rpcrdma_xprt *, void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *); struct list_head *);
void (*ro_recover_mr)(struct rpcrdma_mw *); void (*ro_recover_mr)(struct rpcrdma_mr *mr);
int (*ro_open)(struct rpcrdma_ia *, int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *, struct rpcrdma_ep *,
struct rpcrdma_create_data_internal *); struct rpcrdma_create_data_internal *);
size_t (*ro_maxpages)(struct rpcrdma_xprt *); size_t (*ro_maxpages)(struct rpcrdma_xprt *);
int (*ro_init_mr)(struct rpcrdma_ia *, int (*ro_init_mr)(struct rpcrdma_ia *,
struct rpcrdma_mw *); struct rpcrdma_mr *);
void (*ro_release_mr)(struct rpcrdma_mw *); void (*ro_release_mr)(struct rpcrdma_mr *mr);
const char *ro_displayname; const char *ro_displayname;
const int ro_send_w_inv_ok; const int ro_send_w_inv_ok;
}; };
...@@ -574,15 +574,15 @@ void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); ...@@ -574,15 +574,15 @@ void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf); struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *); struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *); void rpcrdma_mr_put(struct rpcrdma_mr *mr);
void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *); void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *); void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction, struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
gfp_t); gfp_t);
bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *); bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment