Commit 77a7f2e9 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tracing/core' of...

Merge branch 'tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into tracing/core
parents ac0053fd cecbca96
......@@ -784,8 +784,12 @@ and is between 256 and 4096 characters. It is defined in the file
as early as possible in order to facilitate early
boot debugging.
ftrace_dump_on_oops
ftrace_dump_on_oops[=orig_cpu]
[FTRACE] will dump the trace buffers on oops.
If no parameter is passed, ftrace will dump
buffers of all CPUs, but if you pass orig_cpu, it will
dump only the buffer of the CPU that triggered the
oops.
ftrace_filter=[function-list]
[FTRACE] Limit the functions traced by the function
......
......@@ -1337,12 +1337,14 @@ ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
can either use the sysctl function or set it via the proc system
interface.
sysctl kernel.ftrace_dump_on_oops=1
sysctl kernel.ftrace_dump_on_oops=n
or
echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
echo n > /proc/sys/kernel/ftrace_dump_on_oops
If n = 1, ftrace will dump buffers of all CPUs, if n = 2 ftrace will
only dump the buffer of the CPU that triggered the oops.
Here's an example of such a dump after a null pointer
dereference in a kernel module:
......
......@@ -289,7 +289,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
ftrace_dump();
ftrace_dump(DUMP_ALL);
}
static struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
......
......@@ -492,7 +492,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
extern int ftrace_dump_on_oops;
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0,
......
......@@ -490,6 +490,13 @@ static inline void tracing_off(void) { }
static inline void tracing_off_permanent(void) { }
static inline int tracing_is_on(void) { return 0; }
#endif
enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
};
#ifdef CONFIG_TRACING
extern void tracing_start(void);
extern void tracing_stop(void);
......@@ -571,7 +578,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
extern int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(void);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
......@@ -592,7 +599,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
}
static inline void ftrace_dump(void) { }
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
/*
......
......@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
*
* It is default off, but you can enable it with either specifying
* "ftrace_dump_on_oops" in the kernel command line, or setting
* /proc/sys/kernel/ftrace_dump_on_oops to true.
* /proc/sys/kernel/ftrace_dump_on_oops
* Set 1 if you want to dump buffers of all CPUs
* Set 2 if you want to dump the buffer of the CPU that triggered oops
*/
int ftrace_dump_on_oops;
enum ftrace_dump_mode ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
......@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
ftrace_dump_on_oops = 1;
return 1;
if (*str++ != '=' || !*str) {
ftrace_dump_on_oops = DUMP_ALL;
return 1;
}
if (!strcmp("orig_cpu", str)) {
ftrace_dump_on_oops = DUMP_ORIG;
return 1;
}
return 0;
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
......@@ -4338,7 +4350,7 @@ static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
if (ftrace_dump_on_oops)
ftrace_dump();
ftrace_dump(ftrace_dump_on_oops);
return NOTIFY_OK;
}
......@@ -4355,7 +4367,7 @@ static int trace_die_handler(struct notifier_block *self,
switch (val) {
case DIE_OOPS:
if (ftrace_dump_on_oops)
ftrace_dump();
ftrace_dump(ftrace_dump_on_oops);
break;
default:
break;
......@@ -4396,7 +4408,8 @@ trace_printk_seq(struct trace_seq *s)
trace_seq_init(s);
}
static void __ftrace_dump(bool disable_tracing)
static void
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
{
static arch_spinlock_t ftrace_dump_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
......@@ -4429,12 +4442,25 @@ static void __ftrace_dump(bool disable_tracing)
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
printk(KERN_TRACE "Dumping ftrace buffer:\n");
/* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
iter.cpu_file = TRACE_PIPE_ALL_CPU;
switch (oops_dump_mode) {
case DUMP_ALL:
iter.cpu_file = TRACE_PIPE_ALL_CPU;
break;
case DUMP_ORIG:
iter.cpu_file = raw_smp_processor_id();
break;
case DUMP_NONE:
goto out_enable;
default:
printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
iter.cpu_file = TRACE_PIPE_ALL_CPU;
}
printk(KERN_TRACE "Dumping ftrace buffer:\n");
/*
* We need to stop all tracing on all CPUS to read the
......@@ -4473,6 +4499,7 @@ static void __ftrace_dump(bool disable_tracing)
else
printk(KERN_TRACE "---------------------------------\n");
out_enable:
/* Re-enable tracing if requested */
if (!disable_tracing) {
trace_flags |= old_userobj;
......@@ -4489,9 +4516,9 @@ static void __ftrace_dump(bool disable_tracing)
}
/* By default: disable tracing after the dump */
void ftrace_dump(void)
void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
__ftrace_dump(true);
__ftrace_dump(true, oops_dump_mode);
}
__init static int tracer_alloc_buffers(void)
......
......@@ -256,7 +256,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST 100000000
static void __ftrace_dump(bool disable_tracing);
static void
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
static unsigned int graph_hang_thresh;
/* Wrap the real function entry probe to avoid possible hanging */
......@@ -267,7 +268,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
ftrace_graph_stop();
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
if (ftrace_dump_on_oops)
__ftrace_dump(false);
__ftrace_dump(false, DUMP_ALL);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment