Commit cafc7398 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Use generic RDMA R/W API in RPC Call path

The current svcrdma recvfrom code path has a lot of detail about
registration mode and the type of port (iWARP, IB, etc).

Instead, use the RDMA core's generic R/W API. This shares code with
other RDMA-enabled ULPs that manages the gory details of buffer
registration and the posting of RDMA Read Work Requests.

Since the Read list marshaling code is being replaced, I took the
opportunity to replace C structure-based XDR encoding code with more
portable code that uses pointer arithmetic.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 026d958b
...@@ -82,10 +82,7 @@ struct svc_rdma_op_ctxt { ...@@ -82,10 +82,7 @@ struct svc_rdma_op_ctxt {
int hdr_count; int hdr_count;
struct xdr_buf arg; struct xdr_buf arg;
struct ib_cqe cqe; struct ib_cqe cqe;
struct ib_cqe reg_cqe;
struct ib_cqe inv_cqe;
u32 byte_len; u32 byte_len;
u32 position;
struct svcxprt_rdma *xprt; struct svcxprt_rdma *xprt;
unsigned long flags; unsigned long flags;
enum dma_data_direction direction; enum dma_data_direction direction;
...@@ -116,7 +113,6 @@ struct svcxprt_rdma { ...@@ -116,7 +113,6 @@ struct svcxprt_rdma {
struct list_head sc_accept_q; /* Conn. waiting accept */ struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */ int sc_ord; /* RDMA read limit */
int sc_max_sge; int sc_max_sge;
int sc_max_sge_rd; /* max sge for read target */
bool sc_snd_w_inv; /* OK to use Send With Invalidate */ bool sc_snd_w_inv; /* OK to use Send With Invalidate */
atomic_t sc_sq_avail; /* SQEs ready to be consumed */ atomic_t sc_sq_avail; /* SQEs ready to be consumed */
...@@ -141,10 +137,6 @@ struct svcxprt_rdma { ...@@ -141,10 +137,6 @@ struct svcxprt_rdma {
struct ib_qp *sc_qp; struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq; struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq; struct ib_cq *sc_sq_cq;
int (*sc_reader)(struct svcxprt_rdma *,
struct svc_rqst *,
struct svc_rdma_op_ctxt *,
int *, u32 *, u32, u32, u64, bool);
u32 sc_dev_caps; /* distilled device caps */ u32 sc_dev_caps; /* distilled device caps */
unsigned int sc_frmr_pg_list_len; unsigned int sc_frmr_pg_list_len;
struct list_head sc_frmr_q; struct list_head sc_frmr_q;
...@@ -187,12 +179,6 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, ...@@ -187,12 +179,6 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
/* svc_rdma_recvfrom.c */ /* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *); extern int svc_rdma_recvfrom(struct svc_rqst *);
extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
struct svc_rdma_op_ctxt *, int *, u32 *,
u32, u32, u64, bool);
extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
struct svc_rdma_op_ctxt *, int *, u32 *,
u32, u32, u64, bool);
/* svc_rdma_rw.c */ /* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
......
This diff is collapsed.
...@@ -908,8 +908,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -908,8 +908,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
* capabilities of this particular device */ * capabilities of this particular device */
newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge, newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
(size_t)RPCSVC_MAXPAGES); (size_t)RPCSVC_MAXPAGES);
newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
RPCSVC_MAXPAGES);
newxprt->sc_max_req_size = svcrdma_max_req_size; newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr, newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
svcrdma_max_requests); svcrdma_max_requests);
...@@ -998,12 +996,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -998,12 +996,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
* NB: iWARP requires remote write access for the data sink * NB: iWARP requires remote write access for the data sink
* of an RDMA_READ. IB does not. * of an RDMA_READ. IB does not.
*/ */
newxprt->sc_reader = rdma_read_chunk_lcl;
if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
newxprt->sc_frmr_pg_list_len = newxprt->sc_frmr_pg_list_len =
dev->attrs.max_fast_reg_page_list_len; dev->attrs.max_fast_reg_page_list_len;
newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
newxprt->sc_reader = rdma_read_chunk_frmr;
} else } else
newxprt->sc_snd_w_inv = false; newxprt->sc_snd_w_inv = false;
...@@ -1056,7 +1052,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1056,7 +1052,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap)); dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
dprintk(" max_sge : %d\n", newxprt->sc_max_sge); dprintk(" max_sge : %d\n", newxprt->sc_max_sge);
dprintk(" max_sge_rd : %d\n", newxprt->sc_max_sge_rd);
dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth); dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
dprintk(" max_requests : %d\n", newxprt->sc_max_requests); dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
dprintk(" ord : %d\n", newxprt->sc_ord); dprintk(" ord : %d\n", newxprt->sc_ord);
...@@ -1117,12 +1112,6 @@ static void __svc_rdma_free(struct work_struct *work) ...@@ -1117,12 +1112,6 @@ static void __svc_rdma_free(struct work_struct *work)
pr_err("svcrdma: sc_xprt still in use? (%d)\n", pr_err("svcrdma: sc_xprt still in use? (%d)\n",
kref_read(&xprt->xpt_ref)); kref_read(&xprt->xpt_ref));
/*
* Destroy queued, but not processed read completions. Note
* that this cleanup has to be done before destroying the
* cm_id because the device ptr is needed to unmap the dma in
* svc_rdma_put_context.
*/
while (!list_empty(&rdma->sc_read_complete_q)) { while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_read_complete_q, ctxt = list_first_entry(&rdma->sc_read_complete_q,
...@@ -1130,8 +1119,6 @@ static void __svc_rdma_free(struct work_struct *work) ...@@ -1130,8 +1119,6 @@ static void __svc_rdma_free(struct work_struct *work)
list_del(&ctxt->list); list_del(&ctxt->list);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
} }
/* Destroy queued, but not processed recv completions */
while (!list_empty(&rdma->sc_rq_dto_q)) { while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_rq_dto_q, ctxt = list_first_entry(&rdma->sc_rq_dto_q,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment