Commit 03fae44b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.15-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "With the new ORC unwinder, ftrace stack tracing became disfunctional.

  One was that ORC didn't know how to handle the ftrace callbacks in
  general (which Josh fixed).

  The other was that ORC would just bail if it hit a dynamically
  allocated trampoline. Which means all ftrace stack tracing that
  happens from the function tracer would produce no results (that
  includes killing the max stack size tracer). I added a check to the
  ORC unwinder to see if the trampoline belonged to ftrace, and if it
  did, use the orc entry of the static trampoline that was used to
  create the dynamic one (it would be identical).

  Finally, I noticed that the skip values of the stack tracing were out
  of whack. I went through and fixed them up"

* tag 'trace-v4.15-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Update stack trace skipping for ORC unwinder
  ftrace, orc, x86: Handle ftrace dynamically allocated trampolines
  x86/ftrace: Fix ORC unwinding from ftrace handlers
parents ce30f264 2ee5b92a
...@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o := n ...@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n KASAN_SANITIZE_paravirt.o := n
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
ifdef CONFIG_FRAME_POINTER
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
endif
# If instrumentation of this dir is enabled, boot hangs during first second. # If instrumentation of this dir is enabled, boot hangs during first second.
# Probably could be more selective here, but note that files related to irqs, # Probably could be more selective here, but note that files related to irqs,
# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to # boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/unwind_hints.h>
.code64 .code64
.section .entry.text, "ax" .section .entry.text, "ax"
...@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__) ...@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
EXPORT_SYMBOL(mcount) EXPORT_SYMBOL(mcount)
#endif #endif
/* All cases save the original rbp (8 bytes) */
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
# ifdef CC_USING_FENTRY # ifdef CC_USING_FENTRY
/* Save parent and function stack frames (rip and rbp) */ /* Save parent and function stack frames (rip and rbp) */
...@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount) ...@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
# endif # endif
#else #else
/* No need to save a stack frame */ /* No need to save a stack frame */
# define MCOUNT_FRAME_SIZE 8 # define MCOUNT_FRAME_SIZE 0
#endif /* CONFIG_FRAME_POINTER */ #endif /* CONFIG_FRAME_POINTER */
/* Size of stack used to save mcount regs in save_mcount_regs */ /* Size of stack used to save mcount regs in save_mcount_regs */
...@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount) ...@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
*/ */
.macro save_mcount_regs added=0 .macro save_mcount_regs added=0
/* Always save the original rbp */ #ifdef CONFIG_FRAME_POINTER
/* Save the original rbp */
pushq %rbp pushq %rbp
#ifdef CONFIG_FRAME_POINTER
/* /*
* Stack traces will stop at the ftrace trampoline if the frame pointer * Stack traces will stop at the ftrace trampoline if the frame pointer
* is not set up properly. If fentry is used, we need to save a frame * is not set up properly. If fentry is used, we need to save a frame
...@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount) ...@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
* Save the original RBP. Even though the mcount ABI does not * Save the original RBP. Even though the mcount ABI does not
* require this, it helps out callers. * require this, it helps out callers.
*/ */
#ifdef CONFIG_FRAME_POINTER
movq MCOUNT_REG_SIZE-8(%rsp), %rdx movq MCOUNT_REG_SIZE-8(%rsp), %rdx
#else
movq %rbp, %rdx
#endif
movq %rdx, RBP(%rsp) movq %rdx, RBP(%rsp)
/* Copy the parent address into %rsi (second parameter) */ /* Copy the parent address into %rsi (second parameter) */
...@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount) ...@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
ENTRY(function_hook) ENTRY(function_hook)
retq retq
END(function_hook) ENDPROC(function_hook)
ENTRY(ftrace_caller) ENTRY(ftrace_caller)
/* save_mcount_regs fills in first two parameters */ /* save_mcount_regs fills in first two parameters */
...@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call) ...@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
/* This is weak to keep gas from relaxing the jumps */ /* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub) WEAK(ftrace_stub)
retq retq
END(ftrace_caller) ENDPROC(ftrace_caller)
ENTRY(ftrace_regs_caller) ENTRY(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */ /* Save the current flags before any operations that can change them */
...@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end) ...@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
jmp ftrace_epilogue jmp ftrace_epilogue
END(ftrace_regs_caller) ENDPROC(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
...@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller) ...@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
restore_mcount_regs restore_mcount_regs
retq retq
END(ftrace_graph_caller) ENDPROC(ftrace_graph_caller)
GLOBAL(return_to_handler) ENTRY(return_to_handler)
UNWIND_HINT_EMPTY
subq $24, %rsp subq $24, %rsp
/* Save the return values */ /* Save the return values */
...@@ -330,4 +335,5 @@ GLOBAL(return_to_handler) ...@@ -330,4 +335,5 @@ GLOBAL(return_to_handler)
movq (%rsp), %rax movq (%rsp), %rax
addq $24, %rsp addq $24, %rsp
JMP_NOSPEC %rdi JMP_NOSPEC %rdi
END(return_to_handler)
#endif #endif
...@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip) ...@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
} }
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE
static struct orc_entry *orc_find(unsigned long ip);
/*
* Ftrace dynamic trampolines do not have orc entries of their own.
* But they are copies of the ftrace entries that are static and
* defined in ftrace_*.S, which do have orc entries.
*
* If the undwinder comes across a ftrace trampoline, then find the
* ftrace function that was used to create it, and use that ftrace
* function's orc entrie, as the placement of the return code in
* the stack will be identical.
*/
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
struct ftrace_ops *ops;
unsigned long caller;
ops = ftrace_ops_trampoline(ip);
if (!ops)
return NULL;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
caller = (unsigned long)ftrace_regs_call;
else
caller = (unsigned long)ftrace_call;
/* Prevent unlikely recursion */
if (ip == caller)
return NULL;
return orc_find(caller);
}
#else
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
return NULL;
}
#endif
static struct orc_entry *orc_find(unsigned long ip) static struct orc_entry *orc_find(unsigned long ip)
{ {
static struct orc_entry *orc;
if (!orc_init) if (!orc_init)
return NULL; return NULL;
...@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip) ...@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip); __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
/* Module lookup: */ /* Module lookup: */
return orc_module_find(ip); orc = orc_module_find(ip);
if (orc)
return orc;
return orc_ftrace_find(ip);
} }
static void orc_sort_swap(void *_a, void *_b, int size) static void orc_sort_swap(void *_a, void *_b, int size)
......
...@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); ...@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void); extern int ftrace_nr_registered_ops(void);
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr); bool is_ftrace_trampoline(unsigned long addr);
/* /*
......
...@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = { ...@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
}; };
/* /*
* This is used by __kernel_text_address() to return true if the * Used by the stack undwinder to know about dynamic ftrace trampolines.
* address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
*/ */
bool is_ftrace_trampoline(unsigned long addr) struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
{ {
struct ftrace_ops *op; struct ftrace_ops *op = NULL;
bool ret = false;
/* /*
* Some of the ops may be dynamically allocated, * Some of the ops may be dynamically allocated,
...@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr) ...@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
if (op->trampoline && op->trampoline_size) if (op->trampoline && op->trampoline_size)
if (addr >= op->trampoline && if (addr >= op->trampoline &&
addr < op->trampoline + op->trampoline_size) { addr < op->trampoline + op->trampoline_size) {
ret = true; preempt_enable_notrace();
goto out; return op;
} }
} while_for_each_ftrace_op(op); } while_for_each_ftrace_op(op);
out:
preempt_enable_notrace(); preempt_enable_notrace();
return ret; return NULL;
}
/*
* This is used by __kernel_text_address() to return true if the
* address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
*/
bool is_ftrace_trampoline(unsigned long addr)
{
return ftrace_ops_trampoline(addr) != NULL;
} }
struct ftrace_page { struct ftrace_page {
......
...@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) ...@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
} }
EXPORT_SYMBOL_GPL(trace_event_buffer_commit); EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
/*
* Skip 3:
*
* trace_buffer_unlock_commit_regs()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx()
*/
# define STACK_SKIP 3
void trace_buffer_unlock_commit_regs(struct trace_array *tr, void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer, struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
...@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, ...@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
/* /*
* If regs is not set, then skip the following callers: * If regs is not set, then skip the necessary functions.
* trace_buffer_unlock_commit_regs
* event_trigger_unlock_commit
* trace_event_buffer_commit
* trace_event_raw_event_sched_switch
* Note, we can still get here via blktrace, wakeup tracer * Note, we can still get here via blktrace, wakeup tracer
* and mmiotrace, but that's ok if they lose a function or * and mmiotrace, but that's ok if they lose a function or
* two. They are that meaningful. * two. They are not that meaningful.
*/ */
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
ftrace_trace_userstack(buffer, flags, pc); ftrace_trace_userstack(buffer, flags, pc);
} }
...@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.skip = skip; trace.skip = skip;
/* /*
* Add two, for this function and the call to save_stack_trace() * Add one, for this function and the call to save_stack_trace()
* If regs is set, then these functions will not be in the way. * If regs is set, then these functions will not be in the way.
*/ */
#ifndef CONFIG_UNWINDER_ORC
if (!regs) if (!regs)
trace.skip += 2; trace.skip++;
#endif
/* /*
* Since events can happen in NMIs there's no safe way to * Since events can happen in NMIs there's no safe way to
...@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip) ...@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
local_save_flags(flags); local_save_flags(flags);
/* #ifndef CONFIG_UNWINDER_ORC
* Skip 3 more, seems to get us at the caller of /* Skip 1 to skip this function. */
* this function. skip++;
*/ #endif
skip += 3;
__ftrace_trace_stack(global_trace.trace_buffer.buffer, __ftrace_trace_stack(global_trace.trace_buffer.buffer,
flags, skip, preempt_count(), NULL); flags, skip, preempt_count(), NULL);
} }
......
...@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; } ...@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#endif /* CONFIG_TRACER_SNAPSHOT */ #endif /* CONFIG_TRACER_SNAPSHOT */
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
#ifdef CONFIG_UNWINDER_ORC
/* Skip 2:
* event_triggers_post_call()
* trace_event_raw_event_xxx()
*/
# define STACK_SKIP 2
#else
/* /*
* Skip 3: * Skip 4:
* stacktrace_trigger() * stacktrace_trigger()
* event_triggers_post_call() * event_triggers_post_call()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx() * trace_event_raw_event_xxx()
*/ */
#define STACK_SKIP 3 #define STACK_SKIP 4
#endif
static void static void
stacktrace_trigger(struct event_trigger_data *data, void *rec) stacktrace_trigger(struct event_trigger_data *data, void *rec)
......
...@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace(); preempt_enable_notrace();
} }
#ifdef CONFIG_UNWINDER_ORC
/*
* Skip 2:
*
* function_stack_trace_call()
* ftrace_call()
*/
#define STACK_SKIP 2
#else
/*
* Skip 3:
* __trace_stack()
* function_stack_trace_call()
* ftrace_call()
*/
#define STACK_SKIP 3
#endif
static void static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip, function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
...@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
/* __trace_stack(tr, flags, STACK_SKIP, pc);
* skip over 5 funcs:
* __ftrace_trace_stack,
* __trace_stack,
* function_stack_trace_call
* ftrace_list_func
* ftrace_call
*/
__trace_stack(tr, flags, 5, pc);
} }
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
...@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, ...@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
tracer_tracing_off(tr); tracer_tracing_off(tr);
} }
#ifdef CONFIG_UNWINDER_ORC
/* /*
* Skip 4: * Skip 3:
*
* function_trace_probe_call()
* ftrace_ops_assist_func()
* ftrace_call()
*/
#define FTRACE_STACK_SKIP 3
#else
/*
* Skip 5:
*
* __trace_stack()
* ftrace_stacktrace() * ftrace_stacktrace()
* function_trace_probe_call() * function_trace_probe_call()
* ftrace_ops_list_func() * ftrace_ops_assist_func()
* ftrace_call() * ftrace_call()
*/ */
#define STACK_SKIP 4 #define FTRACE_STACK_SKIP 5
#endif
static __always_inline void trace_stack(struct trace_array *tr) static __always_inline void trace_stack(struct trace_array *tr)
{ {
...@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr) ...@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
local_save_flags(flags); local_save_flags(flags);
pc = preempt_count(); pc = preempt_count();
__trace_stack(tr, flags, STACK_SKIP, pc); __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment