Commit e28ce900 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: kmalloc rpcrdma_ep separate from rpcrdma_xprt

Change the rpcrdma_xprt_disconnect() function so that it no longer
waits for the DISCONNECTED event.  This prevents blocking if the
remote is unresponsive.

In rpcrdma_xprt_disconnect(), the transport's rpcrdma_ep is
detached. Upon return from rpcrdma_xprt_disconnect(), the transport
(r_xprt) is ready immediately for a new connection.

The RDMA_CM_DEVICE_REMOVAL and RDMA_CM_DISCONNECTED events are now
handled almost identically.

However, because the lifetimes of rpcrdma_xprt structures and
rpcrdma_ep structures are now independent, creating an rpcrdma_ep
needs to take a module ref count. The ep now owns most of the
hardware resources for a transport.

Also, a kref is needed to ensure that rpcrdma_ep sticks around
long enough for the cm_event_handler to finish.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 745b734c
...@@ -104,7 +104,7 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class, ...@@ -104,7 +104,7 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
TP_fast_assign( TP_fast_assign(
__entry->r_xprt = r_xprt; __entry->r_xprt = r_xprt;
__entry->rc = rc; __entry->rc = rc;
__entry->connect_status = r_xprt->rx_ep.re_connect_status; __entry->connect_status = r_xprt->rx_ep->re_connect_status;
__assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt));
), ),
...@@ -342,37 +342,6 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event, ...@@ -342,37 +342,6 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event,
** Connection events ** Connection events
**/ **/
TRACE_EVENT(xprtrdma_cm_event,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
struct rdma_cm_event *event
),
TP_ARGS(r_xprt, event),
TP_STRUCT__entry(
__field(const void *, r_xprt)
__field(unsigned int, event)
__field(int, status)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->r_xprt = r_xprt;
__entry->event = event->event;
__entry->status = event->status;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
__get_str(addr), __get_str(port),
__entry->r_xprt, rdma_show_cm_event(__entry->event),
__entry->event, __entry->status
)
);
TRACE_EVENT(xprtrdma_inline_thresh, TRACE_EVENT(xprtrdma_inline_thresh,
TP_PROTO( TP_PROTO(
const struct rpcrdma_ep *ep const struct rpcrdma_ep *ep
...@@ -409,34 +378,6 @@ TRACE_EVENT(xprtrdma_inline_thresh, ...@@ -409,34 +378,6 @@ TRACE_EVENT(xprtrdma_inline_thresh,
) )
); );
TRACE_EVENT(xprtrdma_remove,
TP_PROTO(
const struct rpcrdma_ep *ep
),
TP_ARGS(ep),
TP_STRUCT__entry(
__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
__string(name, ep->re_id->device->name)
),
TP_fast_assign(
const struct rdma_cm_id *id = ep->re_id;
memcpy(__entry->srcaddr, &id->route.addr.src_addr,
sizeof(struct sockaddr_in6));
memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
sizeof(struct sockaddr_in6));
__assign_str(name, id->device->name);
),
TP_printk("%pISpc -> %pISpc device=%s",
__entry->srcaddr, __entry->dstaddr, __get_str(name)
)
);
DEFINE_CONN_EVENT(connect); DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect); DEFINE_CONN_EVENT(disconnect);
DEFINE_CONN_EVENT(flush_dct); DEFINE_CONN_EVENT(flush_dct);
...@@ -831,7 +772,7 @@ TRACE_EVENT(xprtrdma_post_recvs, ...@@ -831,7 +772,7 @@ TRACE_EVENT(xprtrdma_post_recvs,
__entry->r_xprt = r_xprt; __entry->r_xprt = r_xprt;
__entry->count = count; __entry->count = count;
__entry->status = status; __entry->status = status;
__entry->posted = r_xprt->rx_ep.re_receive_count; __entry->posted = r_xprt->rx_ep->re_receive_count;
__assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt));
), ),
......
...@@ -44,7 +44,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) ...@@ -44,7 +44,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt) size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
{ {
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_ep *ep = &r_xprt->rx_ep; struct rpcrdma_ep *ep = r_xprt->rx_ep;
size_t maxmsg; size_t maxmsg;
maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv); maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
...@@ -190,7 +190,7 @@ static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt) ...@@ -190,7 +190,7 @@ static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS) if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
return NULL; return NULL;
size = min_t(size_t, r_xprt->rx_ep.re_inline_recv, PAGE_SIZE); size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL); req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
if (!req) if (!req)
return NULL; return NULL;
......
...@@ -74,7 +74,7 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr) ...@@ -74,7 +74,7 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr)
if (mr->mr_dir != DMA_NONE) { if (mr->mr_dir != DMA_NONE) {
trace_xprtrdma_mr_unmap(mr); trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ep.re_id->device, ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
mr->mr_sg, mr->mr_nents, mr->mr_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE; mr->mr_dir = DMA_NONE;
} }
...@@ -115,7 +115,7 @@ void frwr_reset(struct rpcrdma_req *req) ...@@ -115,7 +115,7 @@ void frwr_reset(struct rpcrdma_req *req)
*/ */
int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
{ {
struct rpcrdma_ep *ep = &r_xprt->rx_ep; struct rpcrdma_ep *ep = r_xprt->rx_ep;
unsigned int depth = ep->re_max_fr_depth; unsigned int depth = ep->re_max_fr_depth;
struct scatterlist *sg; struct scatterlist *sg;
struct ib_mr *frmr; struct ib_mr *frmr;
...@@ -283,7 +283,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, ...@@ -283,7 +283,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
int nsegs, bool writing, __be32 xid, int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr *mr) struct rpcrdma_mr *mr)
{ {
struct rpcrdma_ep *ep = &r_xprt->rx_ep; struct rpcrdma_ep *ep = r_xprt->rx_ep;
struct ib_reg_wr *reg_wr; struct ib_reg_wr *reg_wr;
int i, n, dma_nents; int i, n, dma_nents;
struct ib_mr *ibmr; struct ib_mr *ibmr;
...@@ -405,7 +405,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -405,7 +405,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
post_wr = &frwr->fr_regwr.wr; post_wr = &frwr->fr_regwr.wr;
} }
return ib_post_send(r_xprt->rx_ep.re_id->qp, post_wr, NULL); return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL);
} }
/** /**
...@@ -535,7 +535,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -535,7 +535,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* unless re_id->qp is a valid pointer. * unless re_id->qp is a valid pointer.
*/ */
bad_wr = NULL; bad_wr = NULL;
rc = ib_post_send(r_xprt->rx_ep.re_id->qp, first, &bad_wr); rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
/* The final LOCAL_INV WR in the chain is supposed to /* The final LOCAL_INV WR in the chain is supposed to
* do the wake. If it was never posted, the wake will * do the wake. If it was never posted, the wake will
...@@ -640,7 +640,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -640,7 +640,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* unless re_id->qp is a valid pointer. * unless re_id->qp is a valid pointer.
*/ */
bad_wr = NULL; bad_wr = NULL;
rc = ib_post_send(r_xprt->rx_ep.re_id->qp, first, &bad_wr); rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
if (!rc) if (!rc)
return; return;
......
...@@ -131,9 +131,10 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, ...@@ -131,9 +131,10 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
struct rpc_rqst *rqst) struct rpc_rqst *rqst)
{ {
struct xdr_buf *xdr = &rqst->rq_snd_buf; struct xdr_buf *xdr = &rqst->rq_snd_buf;
struct rpcrdma_ep *ep = r_xprt->rx_ep;
unsigned int count, remaining, offset; unsigned int count, remaining, offset;
if (xdr->len > r_xprt->rx_ep.re_max_inline_send) if (xdr->len > ep->re_max_inline_send)
return false; return false;
if (xdr->page_len) { if (xdr->page_len) {
...@@ -144,7 +145,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, ...@@ -144,7 +145,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
remaining -= min_t(unsigned int, remaining -= min_t(unsigned int,
PAGE_SIZE - offset, remaining); PAGE_SIZE - offset, remaining);
offset = 0; offset = 0;
if (++count > r_xprt->rx_ep.re_attr.cap.max_send_sge) if (++count > ep->re_attr.cap.max_send_sge)
return false; return false;
} }
} }
...@@ -161,7 +162,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, ...@@ -161,7 +162,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
struct rpc_rqst *rqst) struct rpc_rqst *rqst)
{ {
return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.re_max_inline_recv; return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
} }
/* The client is required to provide a Reply chunk if the maximum /* The client is required to provide a Reply chunk if the maximum
...@@ -175,7 +176,7 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, ...@@ -175,7 +176,7 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
const struct xdr_buf *buf = &rqst->rq_rcv_buf; const struct xdr_buf *buf = &rqst->rq_rcv_buf;
return (buf->head[0].iov_len + buf->tail[0].iov_len) < return (buf->head[0].iov_len + buf->tail[0].iov_len) <
r_xprt->rx_ep.re_max_inline_recv; r_xprt->rx_ep->re_max_inline_recv;
} }
/* Split @vec on page boundaries into SGEs. FMR registers pages, not /* Split @vec on page boundaries into SGEs. FMR registers pages, not
...@@ -254,7 +255,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ...@@ -254,7 +255,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
/* When encoding a Read chunk, the tail iovec contains an /* When encoding a Read chunk, the tail iovec contains an
* XDR pad and may be omitted. * XDR pad and may be omitted.
*/ */
if (type == rpcrdma_readch && r_xprt->rx_ep.re_implicit_roundup) if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup)
goto out; goto out;
/* When encoding a Write chunk, some servers need to see an /* When encoding a Write chunk, some servers need to see an
...@@ -262,7 +263,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ...@@ -262,7 +263,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
* layer provides space in the tail iovec that may be used * layer provides space in the tail iovec that may be used
* for this purpose. * for this purpose.
*/ */
if (type == rpcrdma_writech && r_xprt->rx_ep.re_implicit_roundup) if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
goto out; goto out;
if (xdrbuf->tail[0].iov_len) if (xdrbuf->tail[0].iov_len)
...@@ -1475,8 +1476,8 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) ...@@ -1475,8 +1476,8 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
if (credits == 0) if (credits == 0)
credits = 1; /* don't deadlock */ credits = 1; /* don't deadlock */
else if (credits > r_xprt->rx_ep.re_max_requests) else if (credits > r_xprt->rx_ep->re_max_requests)
credits = r_xprt->rx_ep.re_max_requests; credits = r_xprt->rx_ep->re_max_requests;
if (buf->rb_credits != credits) if (buf->rb_credits != credits)
rpcrdma_update_cwnd(r_xprt, credits); rpcrdma_update_cwnd(r_xprt, credits);
rpcrdma_post_recvs(r_xprt, false); rpcrdma_post_recvs(r_xprt, false);
......
...@@ -238,12 +238,12 @@ xprt_rdma_connect_worker(struct work_struct *work) ...@@ -238,12 +238,12 @@ xprt_rdma_connect_worker(struct work_struct *work)
struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
rx_connect_worker.work); rx_connect_worker.work);
struct rpc_xprt *xprt = &r_xprt->rx_xprt; struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
int rc; int rc;
rc = rpcrdma_xprt_connect(r_xprt); rc = rpcrdma_xprt_connect(r_xprt);
xprt_clear_connecting(xprt); xprt_clear_connecting(xprt);
if (ep->re_connect_status > 0) { if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) {
xprt->connect_cookie++;
xprt->stat.connect_count++; xprt->stat.connect_count++;
xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_time += (long)jiffies -
xprt->stat.connect_start; xprt->stat.connect_start;
...@@ -266,7 +266,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) ...@@ -266,7 +266,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_op_inject_dsc(r_xprt); trace_xprtrdma_op_inject_dsc(r_xprt);
rdma_disconnect(r_xprt->rx_ep.re_id); rdma_disconnect(r_xprt->rx_ep->re_id);
} }
/** /**
...@@ -316,10 +316,15 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -316,10 +316,15 @@ xprt_setup_rdma(struct xprt_create *args)
if (args->addrlen > sizeof(xprt->addr)) if (args->addrlen > sizeof(xprt->addr))
return ERR_PTR(-EBADF); return ERR_PTR(-EBADF);
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-EIO);
xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
xprt_rdma_slot_table_entries); xprt_rdma_slot_table_entries);
if (!xprt) if (!xprt) {
module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
xprt->timeout = &xprt_rdma_default_timeout; xprt->timeout = &xprt_rdma_default_timeout;
xprt->connect_timeout = xprt->timeout->to_initval; xprt->connect_timeout = xprt->timeout->to_initval;
...@@ -348,11 +353,12 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -348,11 +353,12 @@ xprt_setup_rdma(struct xprt_create *args)
new_xprt = rpcx_to_rdmax(xprt); new_xprt = rpcx_to_rdmax(xprt);
rc = rpcrdma_buffer_create(new_xprt); rc = rpcrdma_buffer_create(new_xprt);
if (rc) if (rc) {
goto out2; xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
if (!try_module_get(THIS_MODULE)) module_put(THIS_MODULE);
goto out4; return ERR_PTR(rc);
}
INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
xprt_rdma_connect_worker); xprt_rdma_connect_worker);
...@@ -364,15 +370,6 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -364,15 +370,6 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->address_strings[RPC_DISPLAY_PORT]); xprt->address_strings[RPC_DISPLAY_PORT]);
trace_xprtrdma_create(new_xprt); trace_xprtrdma_create(new_xprt);
return xprt; return xprt;
out4:
rpcrdma_buffer_destroy(&new_xprt->rx_buf);
rc = -ENODEV;
out2:
trace_xprtrdma_op_destroy(new_xprt);
xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
return ERR_PTR(rc);
} }
/** /**
...@@ -491,11 +488,11 @@ static void ...@@ -491,11 +488,11 @@ static void
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_ep *ep = &r_xprt->rx_ep; struct rpcrdma_ep *ep = r_xprt->rx_ep;
unsigned long delay; unsigned long delay;
delay = 0; delay = 0;
if (ep->re_connect_status != 0) { if (ep && ep->re_connect_status != 0) {
delay = xprt_reconnect_delay(xprt); delay = xprt_reconnect_delay(xprt);
xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO); xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
} }
......
This diff is collapsed.
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
* RDMA Endpoint -- connection endpoint details * RDMA Endpoint -- connection endpoint details
*/ */
struct rpcrdma_ep { struct rpcrdma_ep {
struct kref re_kref;
struct rdma_cm_id *re_id; struct rdma_cm_id *re_id;
struct ib_pd *re_pd; struct ib_pd *re_pd;
unsigned int re_max_rdma_segs; unsigned int re_max_rdma_segs;
...@@ -75,7 +76,6 @@ struct rpcrdma_ep { ...@@ -75,7 +76,6 @@ struct rpcrdma_ep {
bool re_implicit_roundup; bool re_implicit_roundup;
enum ib_mr_type re_mrtype; enum ib_mr_type re_mrtype;
struct completion re_done; struct completion re_done;
struct completion re_remove_done;
unsigned int re_send_count; unsigned int re_send_count;
unsigned int re_send_batch; unsigned int re_send_batch;
unsigned int re_max_inline_send; unsigned int re_max_inline_send;
...@@ -84,6 +84,7 @@ struct rpcrdma_ep { ...@@ -84,6 +84,7 @@ struct rpcrdma_ep {
int re_connect_status; int re_connect_status;
struct ib_qp_init_attr re_attr; struct ib_qp_init_attr re_attr;
wait_queue_head_t re_connect_wait; wait_queue_head_t re_connect_wait;
struct rpc_xprt *re_xprt;
struct rpcrdma_connect_private struct rpcrdma_connect_private
re_cm_private; re_cm_private;
struct rdma_conn_param re_remote_cma; struct rdma_conn_param re_remote_cma;
...@@ -411,7 +412,7 @@ struct rpcrdma_stats { ...@@ -411,7 +412,7 @@ struct rpcrdma_stats {
*/ */
struct rpcrdma_xprt { struct rpcrdma_xprt {
struct rpc_xprt rx_xprt; struct rpc_xprt rx_xprt;
struct rpcrdma_ep rx_ep; struct rpcrdma_ep *rx_ep;
struct rpcrdma_buffer rx_buf; struct rpcrdma_buffer rx_buf;
struct delayed_work rx_connect_worker; struct delayed_work rx_connect_worker;
struct rpc_timeout rx_timeout; struct rpc_timeout rx_timeout;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment