Commit 395069fc authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Add trace points for calls to transport switch methods

Name them "trace_xprtrdma_op_*" so they can be easily enabled as a
group. No trace point is added where the generic layer already has
observability.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent ba217ec6
...@@ -381,11 +381,13 @@ TRACE_EVENT(xprtrdma_disconnect, ...@@ -381,11 +381,13 @@ TRACE_EVENT(xprtrdma_disconnect,
DEFINE_RXPRT_EVENT(xprtrdma_conn_start); DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
DEFINE_RXPRT_EVENT(xprtrdma_conn_tout); DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
DEFINE_RXPRT_EVENT(xprtrdma_create); DEFINE_RXPRT_EVENT(xprtrdma_create);
DEFINE_RXPRT_EVENT(xprtrdma_destroy); DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_remove); DEFINE_RXPRT_EVENT(xprtrdma_remove);
DEFINE_RXPRT_EVENT(xprtrdma_reinsert); DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_reconnect); DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc); DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
TRACE_EVENT(xprtrdma_qp_event, TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO( TP_PROTO(
...@@ -834,7 +836,7 @@ TRACE_EVENT(xprtrdma_decode_seg, ...@@ -834,7 +836,7 @@ TRACE_EVENT(xprtrdma_decode_seg,
** Allocation/release of rpcrdma_reqs and rpcrdma_reps ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
**/ **/
TRACE_EVENT(xprtrdma_allocate, TRACE_EVENT(xprtrdma_op_allocate,
TP_PROTO( TP_PROTO(
const struct rpc_task *task, const struct rpc_task *task,
const struct rpcrdma_req *req const struct rpcrdma_req *req
...@@ -864,7 +866,7 @@ TRACE_EVENT(xprtrdma_allocate, ...@@ -864,7 +866,7 @@ TRACE_EVENT(xprtrdma_allocate,
) )
); );
TRACE_EVENT(xprtrdma_rpc_done, TRACE_EVENT(xprtrdma_op_free,
TP_PROTO( TP_PROTO(
const struct rpc_task *task, const struct rpc_task *task,
const struct rpcrdma_req *req const struct rpcrdma_req *req
......
...@@ -268,7 +268,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) ...@@ -268,7 +268,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
{ {
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_inject_dsc(r_xprt); trace_xprtrdma_op_inject_dsc(r_xprt);
rdma_disconnect(r_xprt->rx_ia.ri_id); rdma_disconnect(r_xprt->rx_ia.ri_id);
} }
...@@ -284,7 +284,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) ...@@ -284,7 +284,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
{ {
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_destroy(r_xprt); trace_xprtrdma_op_destroy(r_xprt);
cancel_delayed_work_sync(&r_xprt->rx_connect_worker); cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
...@@ -418,7 +418,7 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -418,7 +418,7 @@ xprt_setup_rdma(struct xprt_create *args)
out2: out2:
rpcrdma_ia_close(&new_xprt->rx_ia); rpcrdma_ia_close(&new_xprt->rx_ia);
out1: out1:
trace_xprtrdma_destroy(new_xprt); trace_xprtrdma_op_destroy(new_xprt);
xprt_rdma_free_addresses(xprt); xprt_rdma_free_addresses(xprt);
xprt_free(xprt); xprt_free(xprt);
return ERR_PTR(rc); return ERR_PTR(rc);
...@@ -428,7 +428,8 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -428,7 +428,8 @@ xprt_setup_rdma(struct xprt_create *args)
* xprt_rdma_close - close a transport connection * xprt_rdma_close - close a transport connection
* @xprt: transport context * @xprt: transport context
* *
* Called during transport shutdown, reconnect, or device removal. * Called during autoclose or device removal.
*
* Caller holds @xprt's send lock to prevent activity on this * Caller holds @xprt's send lock to prevent activity on this
* transport while the connection is torn down. * transport while the connection is torn down.
*/ */
...@@ -440,6 +441,8 @@ void xprt_rdma_close(struct rpc_xprt *xprt) ...@@ -440,6 +441,8 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
might_sleep(); might_sleep();
trace_xprtrdma_op_close(r_xprt);
/* Prevent marshaling and sending of new requests */ /* Prevent marshaling and sending of new requests */
xprt_clear_connected(xprt); xprt_clear_connected(xprt);
...@@ -525,6 +528,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) ...@@ -525,6 +528,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_op_connect(r_xprt);
if (r_xprt->rx_ep.rep_connected != 0) { if (r_xprt->rx_ep.rep_connected != 0) {
/* Reconnect */ /* Reconnect */
schedule_delayed_work(&r_xprt->rx_connect_worker, schedule_delayed_work(&r_xprt->rx_connect_worker,
...@@ -659,11 +663,11 @@ xprt_rdma_allocate(struct rpc_task *task) ...@@ -659,11 +663,11 @@ xprt_rdma_allocate(struct rpc_task *task)
rqst->rq_buffer = req->rl_sendbuf->rg_base; rqst->rq_buffer = req->rl_sendbuf->rg_base;
rqst->rq_rbuffer = req->rl_recvbuf->rg_base; rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
trace_xprtrdma_allocate(task, req); trace_xprtrdma_op_allocate(task, req);
return 0; return 0;
out_fail: out_fail:
trace_xprtrdma_allocate(task, NULL); trace_xprtrdma_op_allocate(task, NULL);
return -ENOMEM; return -ENOMEM;
} }
...@@ -682,7 +686,7 @@ xprt_rdma_free(struct rpc_task *task) ...@@ -682,7 +686,7 @@ xprt_rdma_free(struct rpc_task *task)
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags)) if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
rpcrdma_release_rqst(r_xprt, req); rpcrdma_release_rqst(r_xprt, req);
trace_xprtrdma_rpc_done(task, req); trace_xprtrdma_op_free(task, req);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment