Commit 03fe9931 authored by Chuck Lever's avatar Chuck Lever Committed by Doug Ledford

svcrdma: Define maximum number of backchannel requests

Extra resources for handling backchannel requests have to be
pre-allocated when a transport instance is created. Set up
additional fields in svcxprt_rdma to track these resources.

The max_requests fields are elements of the RPC-over-RDMA
protocol, so they should be u32. To ensure that unsigned
arithmetic is used everywhere, some other fields in the
svcxprt_rdma struct are updated.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Acked-by: default avatarBruce Fields <bfields@fieldses.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent ba986c96
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
/* RPC/RDMA parameters and stats */ /* RPC/RDMA parameters and stats */
extern unsigned int svcrdma_ord; extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests; extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size; extern unsigned int svcrdma_max_req_size;
extern atomic_t rdma_stat_recv; extern atomic_t rdma_stat_recv;
...@@ -134,10 +135,11 @@ struct svcxprt_rdma { ...@@ -134,10 +135,11 @@ struct svcxprt_rdma {
int sc_max_sge; int sc_max_sge;
int sc_max_sge_rd; /* max sge for read target */ int sc_max_sge_rd; /* max sge for read target */
int sc_sq_depth; /* Depth of SQ */
atomic_t sc_sq_count; /* Number of SQ WR on queue */ atomic_t sc_sq_count; /* Number of SQ WR on queue */
unsigned int sc_sq_depth; /* Depth of SQ */
int sc_max_requests; /* Depth of RQ */ unsigned int sc_rq_depth; /* Depth of RQ */
u32 sc_max_requests; /* Forward credits */
u32 sc_max_bc_requests;/* Backward credits */
int sc_max_req_size; /* Size of each RQ WR buf */ int sc_max_req_size; /* Size of each RQ WR buf */
struct ib_pd *sc_pd; struct ib_pd *sc_pd;
...@@ -186,6 +188,11 @@ struct svcxprt_rdma { ...@@ -186,6 +188,11 @@ struct svcxprt_rdma {
#define RPCRDMA_MAX_REQUESTS 32 #define RPCRDMA_MAX_REQUESTS 32
#define RPCRDMA_MAX_REQ_SIZE 4096 #define RPCRDMA_MAX_REQ_SIZE 4096
/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
* current NFSv4.1 implementation supports one backchannel slot.
*/
#define RPCRDMA_MAX_BC_REQUESTS 2
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
/* svc_rdma_marshal.c */ /* svc_rdma_marshal.c */
......
...@@ -55,6 +55,7 @@ unsigned int svcrdma_ord = RPCRDMA_ORD; ...@@ -55,6 +55,7 @@ unsigned int svcrdma_ord = RPCRDMA_ORD;
static unsigned int min_ord = 1; static unsigned int min_ord = 1;
static unsigned int max_ord = 4096; static unsigned int max_ord = 4096;
unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS; unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS;
unsigned int svcrdma_max_bc_requests = RPCRDMA_MAX_BC_REQUESTS;
static unsigned int min_max_requests = 4; static unsigned int min_max_requests = 4;
static unsigned int max_max_requests = 16384; static unsigned int max_max_requests = 16384;
unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
...@@ -245,9 +246,10 @@ int svc_rdma_init(void) ...@@ -245,9 +246,10 @@ int svc_rdma_init(void)
{ {
dprintk("SVCRDMA Module Init, register RPC RDMA transport\n"); dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord); dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord);
dprintk("\tmax_requests : %d\n", svcrdma_max_requests); dprintk("\tmax_requests : %u\n", svcrdma_max_requests);
dprintk("\tsq_depth : %d\n", dprintk("\tsq_depth : %u\n",
svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT); svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
dprintk("\tmax_bc_requests : %u\n", svcrdma_max_bc_requests);
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size); dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0); svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
......
...@@ -169,12 +169,12 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt, ...@@ -169,12 +169,12 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt) static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
{ {
int i; unsigned int i;
/* Each RPC/RDMA credit can consume a number of send /* Each RPC/RDMA credit can consume a number of send
* and receive WQEs. One ctxt is allocated for each. * and receive WQEs. One ctxt is allocated for each.
*/ */
i = xprt->sc_sq_depth + xprt->sc_max_requests; i = xprt->sc_sq_depth + xprt->sc_rq_depth;
while (i--) { while (i--) {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
...@@ -285,7 +285,7 @@ static struct svc_rdma_req_map *alloc_req_map(gfp_t flags) ...@@ -285,7 +285,7 @@ static struct svc_rdma_req_map *alloc_req_map(gfp_t flags)
static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt) static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt)
{ {
int i; unsigned int i;
/* One for each receive buffer on this connection. */ /* One for each receive buffer on this connection. */
i = xprt->sc_max_requests; i = xprt->sc_max_requests;
...@@ -1016,8 +1016,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1016,8 +1016,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
struct ib_device *dev; struct ib_device *dev;
int uninitialized_var(dma_mr_acc); int uninitialized_var(dma_mr_acc);
int need_dma_mr = 0; int need_dma_mr = 0;
unsigned int i;
int ret = 0; int ret = 0;
int i;
listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
clear_bit(XPT_CONN, &xprt->xpt_flags); clear_bit(XPT_CONN, &xprt->xpt_flags);
...@@ -1046,9 +1046,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1046,9 +1046,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd, newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
RPCSVC_MAXPAGES); RPCSVC_MAXPAGES);
newxprt->sc_max_req_size = svcrdma_max_req_size; newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = min((size_t)dev->attrs.max_qp_wr, newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
(size_t)svcrdma_max_requests); svcrdma_max_requests);
newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr,
svcrdma_max_bc_requests);
newxprt->sc_rq_depth = newxprt->sc_max_requests +
newxprt->sc_max_bc_requests;
newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth;
if (!svc_rdma_prealloc_ctxts(newxprt)) if (!svc_rdma_prealloc_ctxts(newxprt))
goto errout; goto errout;
...@@ -1077,7 +1081,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1077,7 +1081,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk("svcrdma: error creating SQ CQ for connect request\n"); dprintk("svcrdma: error creating SQ CQ for connect request\n");
goto errout; goto errout;
} }
cq_attr.cqe = newxprt->sc_max_requests; cq_attr.cqe = newxprt->sc_rq_depth;
newxprt->sc_rq_cq = ib_create_cq(dev, newxprt->sc_rq_cq = ib_create_cq(dev,
rq_comp_handler, rq_comp_handler,
cq_event_handler, cq_event_handler,
...@@ -1092,7 +1096,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1092,7 +1096,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
qp_attr.event_handler = qp_event_handler; qp_attr.event_handler = qp_event_handler;
qp_attr.qp_context = &newxprt->sc_xprt; qp_attr.qp_context = &newxprt->sc_xprt;
qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
qp_attr.cap.max_send_sge = newxprt->sc_max_sge; qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
...@@ -1183,7 +1187,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1183,7 +1187,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
newxprt->sc_dma_lkey = dev->local_dma_lkey; newxprt->sc_dma_lkey = dev->local_dma_lkey;
/* Post receive buffers */ /* Post receive buffers */
for (i = 0; i < newxprt->sc_max_requests; i++) { for (i = 0; i < newxprt->sc_rq_depth; i++) {
ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
if (ret) { if (ret) {
dprintk("svcrdma: failure posting receive buffers\n"); dprintk("svcrdma: failure posting receive buffers\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment