Commit c3441618 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Per-mode handling for Remote Invalidation

Refactoring change: Remote Invalidation is particular to the memory
registration mode that is use. Use a callout instead of a generic
function to handle Remote Invalidation.

This gets rid of the 8-byte flags field in struct rpcrdma_mw, of
which only a single bit flag has been allocated.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 42b9f5c5
...@@ -450,6 +450,26 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -450,6 +450,26 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
return ERR_PTR(-ENOTCONN); return ERR_PTR(-ENOTCONN);
} }
/* Handle a remotely invalidated mw on the @mws list
*/
static void
frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mws)
{
struct rpcrdma_mw *mw;
list_for_each_entry(mw, mws, mw_list)
if (mw->mw_handle == rep->rr_inv_rkey) {
struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
list_del(&mw->mw_list);
mw->frmr.fr_state = FRMR_IS_INVALID;
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir);
rpcrdma_put_mw(r_xprt, mw);
break; /* only one invalidated MR per RPC */
}
}
/* Invalidate all memory regions that were registered for "req". /* Invalidate all memory regions that were registered for "req".
* *
* Sleeps until it is safe for the host CPU to access the * Sleeps until it is safe for the host CPU to access the
...@@ -478,9 +498,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -478,9 +498,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
list_for_each_entry(mw, mws, mw_list) { list_for_each_entry(mw, mws, mw_list) {
mw->frmr.fr_state = FRMR_IS_INVALID; mw->frmr.fr_state = FRMR_IS_INVALID;
if (mw->mw_flags & RPCRDMA_MW_F_RI)
continue;
f = &mw->frmr; f = &mw->frmr;
dprintk("RPC: %s: invalidating frmr %p\n", dprintk("RPC: %s: invalidating frmr %p\n",
__func__, f); __func__, f);
...@@ -553,6 +570,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) ...@@ -553,6 +570,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map, .ro_map = frwr_op_map,
.ro_reminv = frwr_op_reminv,
.ro_unmap_sync = frwr_op_unmap_sync, .ro_unmap_sync = frwr_op_unmap_sync,
.ro_recover_mr = frwr_op_recover_mr, .ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open, .ro_open = frwr_op_open,
......
...@@ -984,24 +984,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) ...@@ -984,24 +984,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
return fixup_copy_count; return fixup_copy_count;
} }
/* Caller must guarantee @rep remains stable during this call.
*/
static void
rpcrdma_mark_remote_invalidation(struct list_head *mws,
struct rpcrdma_rep *rep)
{
struct rpcrdma_mw *mw;
if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
return;
list_for_each_entry(mw, mws, mw_list)
if (mw->mw_handle == rep->rr_inv_rkey) {
mw->mw_flags = RPCRDMA_MW_F_RI;
break; /* only one invalidated MR per RPC */
}
}
/* By convention, backchannel calls arrive via rdma_msg type /* By convention, backchannel calls arrive via rdma_msg type
* messages, and never populate the chunk lists. This makes * messages, and never populate the chunk lists. This makes
* the RPC/RDMA header small and fixed in size, so it is * the RPC/RDMA header small and fixed in size, so it is
...@@ -1339,9 +1321,11 @@ void rpcrdma_deferred_completion(struct work_struct *work) ...@@ -1339,9 +1321,11 @@ void rpcrdma_deferred_completion(struct work_struct *work)
struct rpcrdma_rep *rep = struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work); container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst); struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
rpcrdma_mark_remote_invalidation(&req->rl_registered, rep); if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
rpcrdma_release_rqst(rep->rr_rxprt, req); r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
rpcrdma_release_rqst(r_xprt, req);
rpcrdma_complete_rqst(rep); rpcrdma_complete_rqst(rep);
} }
......
...@@ -1307,7 +1307,6 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) ...@@ -1307,7 +1307,6 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
if (!mw) if (!mw)
goto out_nomws; goto out_nomws;
mw->mw_flags = 0;
return mw; return mw;
out_nomws: out_nomws:
......
...@@ -272,7 +272,6 @@ struct rpcrdma_mw { ...@@ -272,7 +272,6 @@ struct rpcrdma_mw {
struct scatterlist *mw_sg; struct scatterlist *mw_sg;
int mw_nents; int mw_nents;
enum dma_data_direction mw_dir; enum dma_data_direction mw_dir;
unsigned long mw_flags;
union { union {
struct rpcrdma_fmr fmr; struct rpcrdma_fmr fmr;
struct rpcrdma_frmr frmr; struct rpcrdma_frmr frmr;
...@@ -284,11 +283,6 @@ struct rpcrdma_mw { ...@@ -284,11 +283,6 @@ struct rpcrdma_mw {
struct list_head mw_all; struct list_head mw_all;
}; };
/* mw_flags */
enum {
RPCRDMA_MW_F_RI = 1,
};
/* /*
* struct rpcrdma_req -- structure central to the request/reply sequence. * struct rpcrdma_req -- structure central to the request/reply sequence.
* *
...@@ -485,6 +479,8 @@ struct rpcrdma_memreg_ops { ...@@ -485,6 +479,8 @@ struct rpcrdma_memreg_ops {
(*ro_map)(struct rpcrdma_xprt *, (*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool, struct rpcrdma_mr_seg *, int, bool,
struct rpcrdma_mw **); struct rpcrdma_mw **);
void (*ro_reminv)(struct rpcrdma_rep *rep,
struct list_head *mws);
void (*ro_unmap_sync)(struct rpcrdma_xprt *, void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *); struct list_head *);
void (*ro_recover_mr)(struct rpcrdma_mw *); void (*ro_recover_mr)(struct rpcrdma_mw *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment