Commit a25e21f0 authored by David Howells's avatar David Howells

rxrpc, afs: Use debug_ids rather than pointers in traces

In rxrpc and afs, use the debug_ids that are monotonically allocated to
various objects as they're allocated rather than pointers as kernel
pointers are now hashed making them less useful.  Further, the debug ids
aren't reused anywhere nearly as quickly.

In addition, allow kernel services that use rxrpc, such as afs, to take
numbers from the rxrpc counter, assign them to their own call struct and
pass them in to rxrpc for both client and service calls so that the trace
lines for each will have the same ID tag.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent 827efed6
...@@ -118,6 +118,7 @@ struct afs_call { ...@@ -118,6 +118,7 @@ struct afs_call {
bool ret_reply0; /* T if should return reply[0] on success */ bool ret_reply0; /* T if should return reply[0] on success */
bool upgrade; /* T to request service upgrade */ bool upgrade; /* T to request service upgrade */
u16 service_id; /* Actual service ID (after upgrade) */ u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
u32 operation_ID; /* operation ID for an incoming call */ u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */ u32 count; /* count for use in unmarshalling */
__be32 tmp; /* place to extract temporary data */ __be32 tmp; /* place to extract temporary data */
......
...@@ -131,6 +131,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net, ...@@ -131,6 +131,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->type = type; call->type = type;
call->net = net; call->net = net;
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
atomic_set(&call->usage, 1); atomic_set(&call->usage, 1);
INIT_WORK(&call->async_work, afs_process_async_call); INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq); init_waitqueue_head(&call->waitq);
...@@ -169,11 +170,12 @@ void afs_put_call(struct afs_call *call) ...@@ -169,11 +170,12 @@ void afs_put_call(struct afs_call *call)
afs_put_server(call->net, call->cm_server); afs_put_server(call->net, call->cm_server);
afs_put_cb_interest(call->net, call->cbi); afs_put_cb_interest(call->net, call->cbi);
kfree(call->request); kfree(call->request);
kfree(call);
o = atomic_dec_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_free, 0, o, trace_afs_call(call, afs_call_trace_free, 0, o,
__builtin_return_address(0)); __builtin_return_address(0));
kfree(call);
o = atomic_dec_return(&net->nr_outstanding_calls);
if (o == 0) if (o == 0)
wake_up_atomic_t(&net->nr_outstanding_calls); wake_up_atomic_t(&net->nr_outstanding_calls);
} }
...@@ -378,7 +380,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, ...@@ -378,7 +380,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
(async ? (async ?
afs_wake_up_async_call : afs_wake_up_async_call :
afs_wake_up_call_waiter), afs_wake_up_call_waiter),
call->upgrade); call->upgrade,
call->debug_id);
if (IS_ERR(rxcall)) { if (IS_ERR(rxcall)) {
ret = PTR_ERR(rxcall); ret = PTR_ERR(rxcall);
goto error_kill_call; goto error_kill_call;
...@@ -727,7 +730,8 @@ void afs_charge_preallocation(struct work_struct *work) ...@@ -727,7 +730,8 @@ void afs_charge_preallocation(struct work_struct *work)
afs_wake_up_async_call, afs_wake_up_async_call,
afs_rx_attach, afs_rx_attach,
(unsigned long)call, (unsigned long)call,
GFP_KERNEL) < 0) GFP_KERNEL,
call->debug_id) < 0)
break; break;
call = NULL; call = NULL;
} }
......
...@@ -31,6 +31,11 @@ enum rxrpc_call_completion { ...@@ -31,6 +31,11 @@ enum rxrpc_call_completion {
NR__RXRPC_CALL_COMPLETIONS NR__RXRPC_CALL_COMPLETIONS
}; };
/*
* Debug ID counter for tracing.
*/
extern atomic_t rxrpc_debug_id;
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *, typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long); unsigned long);
typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *, typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
...@@ -50,7 +55,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, ...@@ -50,7 +55,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
s64, s64,
gfp_t, gfp_t,
rxrpc_notify_rx_t, rxrpc_notify_rx_t,
bool); bool,
unsigned int);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t, struct msghdr *, size_t,
rxrpc_notify_end_tx_t); rxrpc_notify_end_tx_t);
...@@ -63,7 +69,8 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, ...@@ -63,7 +69,8 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *); struct sockaddr_rxrpc *);
u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t); rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *); struct sockaddr_rxrpc *, struct key *);
......
...@@ -133,8 +133,7 @@ TRACE_EVENT(afs_recv_data, ...@@ -133,8 +133,7 @@ TRACE_EVENT(afs_recv_data,
TP_ARGS(call, count, offset, want_more, ret), TP_ARGS(call, count, offset, want_more, ret),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct rxrpc_call *, rxcall ) __field(unsigned int, call )
__field(struct afs_call *, call )
__field(enum afs_call_state, state ) __field(enum afs_call_state, state )
__field(unsigned int, count ) __field(unsigned int, count )
__field(unsigned int, offset ) __field(unsigned int, offset )
...@@ -144,8 +143,7 @@ TRACE_EVENT(afs_recv_data, ...@@ -144,8 +143,7 @@ TRACE_EVENT(afs_recv_data,
), ),
TP_fast_assign( TP_fast_assign(
__entry->rxcall = call->rxcall; __entry->call = call->debug_id;
__entry->call = call;
__entry->state = call->state; __entry->state = call->state;
__entry->unmarshall = call->unmarshall; __entry->unmarshall = call->unmarshall;
__entry->count = count; __entry->count = count;
...@@ -154,8 +152,7 @@ TRACE_EVENT(afs_recv_data, ...@@ -154,8 +152,7 @@ TRACE_EVENT(afs_recv_data,
__entry->ret = ret; __entry->ret = ret;
), ),
TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d", TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d",
__entry->rxcall,
__entry->call, __entry->call,
__entry->state, __entry->unmarshall, __entry->state, __entry->unmarshall,
__entry->offset, __entry->count, __entry->offset, __entry->count,
...@@ -168,21 +165,18 @@ TRACE_EVENT(afs_notify_call, ...@@ -168,21 +165,18 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call), TP_ARGS(rxcall, call),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct rxrpc_call *, rxcall ) __field(unsigned int, call )
__field(struct afs_call *, call )
__field(enum afs_call_state, state ) __field(enum afs_call_state, state )
__field(unsigned short, unmarshall ) __field(unsigned short, unmarshall )
), ),
TP_fast_assign( TP_fast_assign(
__entry->rxcall = rxcall; __entry->call = call->debug_id;
__entry->call = call;
__entry->state = call->state; __entry->state = call->state;
__entry->unmarshall = call->unmarshall; __entry->unmarshall = call->unmarshall;
), ),
TP_printk("c=%p ac=%p s=%u u=%u", TP_printk("c=%08x s=%u u=%u",
__entry->rxcall,
__entry->call, __entry->call,
__entry->state, __entry->unmarshall) __entry->state, __entry->unmarshall)
); );
...@@ -193,21 +187,18 @@ TRACE_EVENT(afs_cb_call, ...@@ -193,21 +187,18 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call), TP_ARGS(call),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct rxrpc_call *, rxcall ) __field(unsigned int, call )
__field(struct afs_call *, call )
__field(const char *, name ) __field(const char *, name )
__field(u32, op ) __field(u32, op )
), ),
TP_fast_assign( TP_fast_assign(
__entry->rxcall = call->rxcall; __entry->call = call->debug_id;
__entry->call = call;
__entry->name = call->type->name; __entry->name = call->type->name;
__entry->op = call->operation_ID; __entry->op = call->operation_ID;
), ),
TP_printk("c=%p ac=%p %s o=%u", TP_printk("c=%08x %s o=%u",
__entry->rxcall,
__entry->call, __entry->call,
__entry->name, __entry->name,
__entry->op) __entry->op)
...@@ -220,7 +211,7 @@ TRACE_EVENT(afs_call, ...@@ -220,7 +211,7 @@ TRACE_EVENT(afs_call,
TP_ARGS(call, op, usage, outstanding, where), TP_ARGS(call, op, usage, outstanding, where),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(int, op ) __field(int, op )
__field(int, usage ) __field(int, usage )
__field(int, outstanding ) __field(int, outstanding )
...@@ -228,14 +219,14 @@ TRACE_EVENT(afs_call, ...@@ -228,14 +219,14 @@ TRACE_EVENT(afs_call,
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->op = op; __entry->op = op;
__entry->usage = usage; __entry->usage = usage;
__entry->outstanding = outstanding; __entry->outstanding = outstanding;
__entry->where = where; __entry->where = where;
), ),
TP_printk("c=%p %s u=%d o=%d sp=%pSR", TP_printk("c=%08x %s u=%d o=%d sp=%pSR",
__entry->call, __entry->call,
__print_symbolic(__entry->op, afs_call_traces), __print_symbolic(__entry->op, afs_call_traces),
__entry->usage, __entry->usage,
...@@ -249,13 +240,13 @@ TRACE_EVENT(afs_make_fs_call, ...@@ -249,13 +240,13 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid), TP_ARGS(call, fid),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(enum afs_fs_operation, op ) __field(enum afs_fs_operation, op )
__field_struct(struct afs_fid, fid ) __field_struct(struct afs_fid, fid )
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->op = call->operation_ID; __entry->op = call->operation_ID;
if (fid) { if (fid) {
__entry->fid = *fid; __entry->fid = *fid;
...@@ -266,7 +257,7 @@ TRACE_EVENT(afs_make_fs_call, ...@@ -266,7 +257,7 @@ TRACE_EVENT(afs_make_fs_call,
} }
), ),
TP_printk("c=%p %06x:%06x:%06x %s", TP_printk("c=%08x %06x:%06x:%06x %s",
__entry->call, __entry->call,
__entry->fid.vid, __entry->fid.vid,
__entry->fid.vnode, __entry->fid.vnode,
...@@ -280,16 +271,16 @@ TRACE_EVENT(afs_make_vl_call, ...@@ -280,16 +271,16 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call), TP_ARGS(call),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(enum afs_vl_operation, op ) __field(enum afs_vl_operation, op )
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->op = call->operation_ID; __entry->op = call->operation_ID;
), ),
TP_printk("c=%p %s", TP_printk("c=%08x %s",
__entry->call, __entry->call,
__print_symbolic(__entry->op, afs_vl_operations)) __print_symbolic(__entry->op, afs_vl_operations))
); );
...@@ -300,20 +291,20 @@ TRACE_EVENT(afs_call_done, ...@@ -300,20 +291,20 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call), TP_ARGS(call),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(struct rxrpc_call *, rx_call ) __field(struct rxrpc_call *, rx_call )
__field(int, ret ) __field(int, ret )
__field(u32, abort_code ) __field(u32, abort_code )
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->rx_call = call->rxcall; __entry->rx_call = call->rxcall;
__entry->ret = call->error; __entry->ret = call->error;
__entry->abort_code = call->abort_code; __entry->abort_code = call->abort_code;
), ),
TP_printk(" c=%p ret=%d ab=%d [%p]", TP_printk(" c=%08x ret=%d ab=%d [%p]",
__entry->call, __entry->call,
__entry->ret, __entry->ret,
__entry->abort_code, __entry->abort_code,
...@@ -327,7 +318,7 @@ TRACE_EVENT(afs_send_pages, ...@@ -327,7 +318,7 @@ TRACE_EVENT(afs_send_pages,
TP_ARGS(call, msg, first, last, offset), TP_ARGS(call, msg, first, last, offset),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(pgoff_t, first ) __field(pgoff_t, first )
__field(pgoff_t, last ) __field(pgoff_t, last )
__field(unsigned int, nr ) __field(unsigned int, nr )
...@@ -337,7 +328,7 @@ TRACE_EVENT(afs_send_pages, ...@@ -337,7 +328,7 @@ TRACE_EVENT(afs_send_pages,
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->first = first; __entry->first = first;
__entry->last = last; __entry->last = last;
__entry->nr = msg->msg_iter.nr_segs; __entry->nr = msg->msg_iter.nr_segs;
...@@ -346,7 +337,7 @@ TRACE_EVENT(afs_send_pages, ...@@ -346,7 +337,7 @@ TRACE_EVENT(afs_send_pages,
__entry->flags = msg->msg_flags; __entry->flags = msg->msg_flags;
), ),
TP_printk(" c=%p %lx-%lx-%lx b=%x o=%x f=%x", TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x",
__entry->call, __entry->call,
__entry->first, __entry->first + __entry->nr - 1, __entry->last, __entry->first, __entry->first + __entry->nr - 1, __entry->last,
__entry->bytes, __entry->offset, __entry->bytes, __entry->offset,
...@@ -360,7 +351,7 @@ TRACE_EVENT(afs_sent_pages, ...@@ -360,7 +351,7 @@ TRACE_EVENT(afs_sent_pages,
TP_ARGS(call, first, last, cursor, ret), TP_ARGS(call, first, last, cursor, ret),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(pgoff_t, first ) __field(pgoff_t, first )
__field(pgoff_t, last ) __field(pgoff_t, last )
__field(pgoff_t, cursor ) __field(pgoff_t, cursor )
...@@ -368,14 +359,14 @@ TRACE_EVENT(afs_sent_pages, ...@@ -368,14 +359,14 @@ TRACE_EVENT(afs_sent_pages,
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->first = first; __entry->first = first;
__entry->last = last; __entry->last = last;
__entry->cursor = cursor; __entry->cursor = cursor;
__entry->ret = ret; __entry->ret = ret;
), ),
TP_printk(" c=%p %lx-%lx c=%lx r=%d", TP_printk(" c=%08x %lx-%lx c=%lx r=%d",
__entry->call, __entry->call,
__entry->first, __entry->last, __entry->first, __entry->last,
__entry->cursor, __entry->ret) __entry->cursor, __entry->ret)
...@@ -450,7 +441,7 @@ TRACE_EVENT(afs_call_state, ...@@ -450,7 +441,7 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort), TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct afs_call *, call ) __field(unsigned int, call )
__field(enum afs_call_state, from ) __field(enum afs_call_state, from )
__field(enum afs_call_state, to ) __field(enum afs_call_state, to )
__field(int, ret ) __field(int, ret )
...@@ -458,14 +449,14 @@ TRACE_EVENT(afs_call_state, ...@@ -458,14 +449,14 @@ TRACE_EVENT(afs_call_state,
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call->debug_id;
__entry->from = from; __entry->from = from;
__entry->to = to; __entry->to = to;
__entry->ret = ret; __entry->ret = ret;
__entry->abort = remote_abort; __entry->abort = remote_abort;
), ),
TP_printk("c=%p %u->%u r=%d ab=%d", TP_printk("c=%08x %u->%u r=%d ab=%d",
__entry->call, __entry->call,
__entry->from, __entry->to, __entry->from, __entry->to,
__entry->ret, __entry->abort) __entry->ret, __entry->abort)
......
This diff is collapsed.
...@@ -40,6 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops; ...@@ -40,6 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
/* current debugging ID */ /* current debugging ID */
atomic_t rxrpc_debug_id; atomic_t rxrpc_debug_id;
EXPORT_SYMBOL(rxrpc_debug_id);
/* count of skbs currently in use */ /* count of skbs currently in use */
atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
...@@ -267,6 +268,7 @@ static int rxrpc_listen(struct socket *sock, int backlog) ...@@ -267,6 +268,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
* @gfp: The allocation constraints * @gfp: The allocation constraints
* @notify_rx: Where to send notifications instead of socket queue * @notify_rx: Where to send notifications instead of socket queue
* @upgrade: Request service upgrade for call * @upgrade: Request service upgrade for call
* @debug_id: The debug ID for tracing to be assigned to the call
* *
* Allow a kernel service to begin a call on the nominated socket. This just * Allow a kernel service to begin a call on the nominated socket. This just
* sets up all the internal tracking structures and allocates connection and * sets up all the internal tracking structures and allocates connection and
...@@ -282,7 +284,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, ...@@ -282,7 +284,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
s64 tx_total_len, s64 tx_total_len,
gfp_t gfp, gfp_t gfp,
rxrpc_notify_rx_t notify_rx, rxrpc_notify_rx_t notify_rx,
bool upgrade) bool upgrade,
unsigned int debug_id)
{ {
struct rxrpc_conn_parameters cp; struct rxrpc_conn_parameters cp;
struct rxrpc_call_params p; struct rxrpc_call_params p;
...@@ -314,7 +317,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, ...@@ -314,7 +317,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false; cp.exclusive = false;
cp.upgrade = upgrade; cp.upgrade = upgrade;
cp.service_id = srx->srx_service; cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp); call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
/* The socket has been unlocked. */ /* The socket has been unlocked. */
if (!IS_ERR(call)) { if (!IS_ERR(call)) {
call->notify_rx = notify_rx; call->notify_rx = notify_rx;
......
...@@ -691,7 +691,6 @@ struct rxrpc_send_params { ...@@ -691,7 +691,6 @@ struct rxrpc_send_params {
* af_rxrpc.c * af_rxrpc.c
*/ */
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue; extern struct workqueue_struct *rxrpc_workqueue;
/* /*
...@@ -732,11 +731,12 @@ extern unsigned int rxrpc_max_call_lifetime; ...@@ -732,11 +731,12 @@ extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar; extern struct kmem_cache *rxrpc_call_jar;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t); struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *, struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *, struct sockaddr_rxrpc *,
struct rxrpc_call_params *, gfp_t); struct rxrpc_call_params *, gfp_t,
unsigned int);
int rxrpc_retry_client_call(struct rxrpc_sock *, int rxrpc_retry_client_call(struct rxrpc_sock *,
struct rxrpc_call *, struct rxrpc_call *,
struct rxrpc_conn_parameters *, struct rxrpc_conn_parameters *,
...@@ -822,7 +822,7 @@ static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call, ...@@ -822,7 +822,7 @@ static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
rxrpc_seq_t seq, rxrpc_seq_t seq,
u32 abort_code, int error) u32 abort_code, int error)
{ {
trace_rxrpc_abort(why, call->cid, call->call_id, seq, trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
abort_code, error); abort_code, error);
return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED, return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
abort_code, error); abort_code, error);
......
...@@ -34,7 +34,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -34,7 +34,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
struct rxrpc_backlog *b, struct rxrpc_backlog *b,
rxrpc_notify_rx_t notify_rx, rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call, rxrpc_user_attach_call_t user_attach_call,
unsigned long user_call_ID, gfp_t gfp) unsigned long user_call_ID, gfp_t gfp,
unsigned int debug_id)
{ {
const void *here = __builtin_return_address(0); const void *here = __builtin_return_address(0);
struct rxrpc_call *call; struct rxrpc_call *call;
...@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
/* Now it gets complicated, because calls get registered with the /* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user. * socket here, particularly if a user ID is preassigned by the user.
*/ */
call = rxrpc_alloc_call(rx, gfp); call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call) if (!call)
return -ENOMEM; return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE); call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
...@@ -174,7 +175,8 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) ...@@ -174,7 +175,8 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
if (rx->discard_new_call) if (rx->discard_new_call)
return 0; return 0;
while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0) while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
atomic_inc_return(&rxrpc_debug_id)) == 0)
; ;
return 0; return 0;
...@@ -347,7 +349,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -347,7 +349,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
service_id == rx->second_service)) service_id == rx->second_service))
goto found_service; goto found_service;
trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EOPNOTSUPP); RX_INVALID_OPERATION, EOPNOTSUPP);
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
skb->priority = RX_INVALID_OPERATION; skb->priority = RX_INVALID_OPERATION;
...@@ -358,7 +360,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, ...@@ -358,7 +360,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
spin_lock(&rx->incoming_lock); spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) { rx->sk.sk_state == RXRPC_CLOSE) {
trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
skb->priority = RX_INVALID_OPERATION; skb->priority = RX_INVALID_OPERATION;
...@@ -635,6 +637,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx) ...@@ -635,6 +637,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
* @user_attach_call: Func to attach call to user_call_ID * @user_attach_call: Func to attach call to user_call_ID
* @user_call_ID: The tag to attach to the preallocated call * @user_call_ID: The tag to attach to the preallocated call
* @gfp: The allocation conditions. * @gfp: The allocation conditions.
* @debug_id: The tracing debug ID.
* *
* Charge up the socket with preallocated calls, each with a user ID. A * Charge up the socket with preallocated calls, each with a user ID. A
* function should be provided to effect the attachment from the user's side. * function should be provided to effect the attachment from the user's side.
...@@ -645,7 +648,8 @@ int rxrpc_reject_call(struct rxrpc_sock *rx) ...@@ -645,7 +648,8 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
int rxrpc_kernel_charge_accept(struct socket *sock, int rxrpc_kernel_charge_accept(struct socket *sock,
rxrpc_notify_rx_t notify_rx, rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call, rxrpc_user_attach_call_t user_attach_call,
unsigned long user_call_ID, gfp_t gfp) unsigned long user_call_ID, gfp_t gfp,
unsigned int debug_id)
{ {
struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct rxrpc_backlog *b = rx->backlog; struct rxrpc_backlog *b = rx->backlog;
...@@ -655,6 +659,6 @@ int rxrpc_kernel_charge_accept(struct socket *sock, ...@@ -655,6 +659,6 @@ int rxrpc_kernel_charge_accept(struct socket *sock,
return rxrpc_service_prealloc_one(rx, b, notify_rx, return rxrpc_service_prealloc_one(rx, b, notify_rx,
user_attach_call, user_call_ID, user_attach_call, user_call_ID,
gfp); gfp, debug_id);
} }
EXPORT_SYMBOL(rxrpc_kernel_charge_accept); EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
...@@ -99,7 +99,8 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, ...@@ -99,7 +99,8 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
/* /*
* allocate a new call * allocate a new call
*/ */
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
unsigned int debug_id)
{ {
struct rxrpc_call *call; struct rxrpc_call *call;
...@@ -138,7 +139,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) ...@@ -138,7 +139,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
spin_lock_init(&call->notify_lock); spin_lock_init(&call->notify_lock);
rwlock_init(&call->state_lock); rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1); atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id); call->debug_id = debug_id;
call->tx_total_len = -1; call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ; call->next_rx_timo = 20 * HZ;
call->next_req_timo = 1 * HZ; call->next_req_timo = 1 * HZ;
...@@ -166,14 +167,15 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) ...@@ -166,14 +167,15 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
*/ */
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
struct sockaddr_rxrpc *srx, struct sockaddr_rxrpc *srx,
gfp_t gfp) gfp_t gfp,
unsigned int debug_id)
{ {
struct rxrpc_call *call; struct rxrpc_call *call;
ktime_t now; ktime_t now;
_enter(""); _enter("");
call = rxrpc_alloc_call(rx, gfp); call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call) if (!call)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
...@@ -214,7 +216,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -214,7 +216,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp, struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx, struct sockaddr_rxrpc *srx,
struct rxrpc_call_params *p, struct rxrpc_call_params *p,
gfp_t gfp) gfp_t gfp,
unsigned int debug_id)
__releases(&rx->sk.sk_lock.slock) __releases(&rx->sk.sk_lock.slock)
{ {
struct rxrpc_call *call, *xcall; struct rxrpc_call *call, *xcall;
...@@ -225,7 +228,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -225,7 +228,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
_enter("%p,%lx", rx, p->user_call_ID); _enter("%p,%lx", rx, p->user_call_ID);
call = rxrpc_alloc_client_call(rx, srx, gfp); call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
if (IS_ERR(call)) { if (IS_ERR(call)) {
release_sock(&rx->sk); release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call)); _leave(" = %ld", PTR_ERR(call));
......
...@@ -160,7 +160,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, ...@@ -160,7 +160,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
lockdep_is_held(&conn->channel_lock)); lockdep_is_held(&conn->channel_lock));
if (call) { if (call) {
if (compl == RXRPC_CALL_LOCALLY_ABORTED) if (compl == RXRPC_CALL_LOCALLY_ABORTED)
trace_rxrpc_abort("CON", call->cid, trace_rxrpc_abort(call->debug_id,
"CON", call->cid,
call->call_id, 0, call->call_id, 0,
abort_code, error); abort_code, error);
if (rxrpc_set_call_completion(call, compl, if (rxrpc_set_call_completion(call, compl,
......
...@@ -1307,21 +1307,21 @@ void rxrpc_data_ready(struct sock *udp_sk) ...@@ -1307,21 +1307,21 @@ void rxrpc_data_ready(struct sock *udp_sk)
wrong_security: wrong_security:
rcu_read_unlock(); rcu_read_unlock();
trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG); RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY; skb->priority = RXKADINCONSISTENCY;
goto post_abort; goto post_abort;
reupgrade: reupgrade:
rcu_read_unlock(); rcu_read_unlock();
trace_rxrpc_abort("UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG); RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error; goto protocol_error;
bad_message_unlock: bad_message_unlock:
rcu_read_unlock(); rcu_read_unlock();
bad_message: bad_message:
trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG); RX_PROTOCOL_ERROR, EBADMSG);
protocol_error: protocol_error:
skb->priority = RX_PROTOCOL_ERROR; skb->priority = RX_PROTOCOL_ERROR;
......
...@@ -579,7 +579,8 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, ...@@ -579,7 +579,8 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
cp.exclusive = rx->exclusive | p->exclusive; cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade; cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service; cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL); call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
atomic_inc_return(&rxrpc_debug_id));
/* The socket is now unlocked */ /* The socket is now unlocked */
_leave(" = %p\n", call); _leave(" = %p\n", call);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment