tracing: Update stack trace skipping for ORC unwinder

With the addition of ORC unwinder and FRAME POINTER unwinder, the stack
trace skipping requirements have changed.

I went through the tracing stack trace dumps with ORC and with frame
pointers and recalculated the proper values.
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 6be7fa3c
...@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) ...@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
} }
EXPORT_SYMBOL_GPL(trace_event_buffer_commit); EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
/*
* Skip 3:
*
* trace_buffer_unlock_commit_regs()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx()
*/
# define STACK_SKIP 3
void trace_buffer_unlock_commit_regs(struct trace_array *tr, void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer, struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
...@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, ...@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
/* /*
* If regs is not set, then skip the following callers: * If regs is not set, then skip the necessary functions.
* trace_buffer_unlock_commit_regs
* event_trigger_unlock_commit
* trace_event_buffer_commit
* trace_event_raw_event_sched_switch
* Note, we can still get here via blktrace, wakeup tracer * Note, we can still get here via blktrace, wakeup tracer
* and mmiotrace, but that's ok if they lose a function or * and mmiotrace, but that's ok if they lose a function or
* two. They are that meaningful. * two. They are not that meaningful.
*/ */
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
ftrace_trace_userstack(buffer, flags, pc); ftrace_trace_userstack(buffer, flags, pc);
} }
...@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.skip = skip; trace.skip = skip;
/* /*
* Add two, for this function and the call to save_stack_trace() * Add one, for this function and the call to save_stack_trace()
* If regs is set, then these functions will not be in the way. * If regs is set, then these functions will not be in the way.
*/ */
#ifndef CONFIG_UNWINDER_ORC
if (!regs) if (!regs)
trace.skip += 2; trace.skip++;
#endif
/* /*
* Since events can happen in NMIs there's no safe way to * Since events can happen in NMIs there's no safe way to
...@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip) ...@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
local_save_flags(flags); local_save_flags(flags);
/* #ifndef CONFIG_UNWINDER_ORC
* Skip 3 more, seems to get us at the caller of /* Skip 1 to skip this function. */
* this function. skip++;
*/ #endif
skip += 3;
__ftrace_trace_stack(global_trace.trace_buffer.buffer, __ftrace_trace_stack(global_trace.trace_buffer.buffer,
flags, skip, preempt_count(), NULL); flags, skip, preempt_count(), NULL);
} }
......
...@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; } ...@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#endif /* CONFIG_TRACER_SNAPSHOT */ #endif /* CONFIG_TRACER_SNAPSHOT */
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
#ifdef CONFIG_UNWINDER_ORC
/* Skip 2:
* event_triggers_post_call()
* trace_event_raw_event_xxx()
*/
# define STACK_SKIP 2
#else
/* /*
* Skip 3: * Skip 4:
* stacktrace_trigger() * stacktrace_trigger()
* event_triggers_post_call() * event_triggers_post_call()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx() * trace_event_raw_event_xxx()
*/ */
#define STACK_SKIP 3 #define STACK_SKIP 4
#endif
static void static void
stacktrace_trigger(struct event_trigger_data *data, void *rec) stacktrace_trigger(struct event_trigger_data *data, void *rec)
......
...@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace(); preempt_enable_notrace();
} }
#ifdef CONFIG_UNWINDER_ORC
/*
* Skip 2:
*
* function_stack_trace_call()
* ftrace_call()
*/
#define STACK_SKIP 2
#else
/*
* Skip 3:
* __trace_stack()
* function_stack_trace_call()
* ftrace_call()
*/
#define STACK_SKIP 3
#endif
static void static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip, function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
...@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
/* __trace_stack(tr, flags, STACK_SKIP, pc);
* skip over 5 funcs:
* __ftrace_trace_stack,
* __trace_stack,
* function_stack_trace_call
* ftrace_list_func
* ftrace_call
*/
__trace_stack(tr, flags, 5, pc);
} }
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
...@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, ...@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
tracer_tracing_off(tr); tracer_tracing_off(tr);
} }
#ifdef CONFIG_UNWINDER_ORC
/* /*
* Skip 4: * Skip 3:
*
* function_trace_probe_call()
* ftrace_ops_assist_func()
* ftrace_call()
*/
#define FTRACE_STACK_SKIP 3
#else
/*
* Skip 5:
*
* __trace_stack()
* ftrace_stacktrace() * ftrace_stacktrace()
* function_trace_probe_call() * function_trace_probe_call()
* ftrace_ops_list_func() * ftrace_ops_assist_func()
* ftrace_call() * ftrace_call()
*/ */
#define STACK_SKIP 4 #define FTRACE_STACK_SKIP 5
#endif
static __always_inline void trace_stack(struct trace_array *tr) static __always_inline void trace_stack(struct trace_array *tr)
{ {
...@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr) ...@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
local_save_flags(flags); local_save_flags(flags);
pc = preempt_count(); pc = preempt_count();
__trace_stack(tr, flags, STACK_SKIP, pc); __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment