Commit b2a866f9 authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ftrace: function tracer with irqs disabled

Impact: disable interrupts during trace entry creation (as opposed to preempt)

To help with performance, I set the ftracer to not disable interrupts,
and only to disable preemption. If an interrupt occurred, it would not
be traced, because the function tracer protects itself from recursion.
This may be faster, but the trace output might miss some traces.

This patch makes the fuction trace disable interrupts, but it also
adds a runtime feature to disable preemption instead. It does this by
having two different tracer functions. When the function tracer is
enabled, it will check to see which version is requested (irqs disabled
or preemption disabled). Then it will use the corresponding function
as the tracer.

Irq disabling is the default behaviour, but if the user wants better
performance, with the chance of missing traces, then they can choose
the preempt disabled version.

Running hackbench 3 times with the irqs disabled and 3 times with
the preempt disabled function tracer yielded:

tracing type       times            entries recorded
------------      --------          ----------------
irq disabled      43.393            166433066
                  43.282            166172618
                  43.298            166256704

preempt disabled  38.969            159871710
                  38.943            159972935
                  39.325            161056510

Average:

   irqs disabled:  43.324           166287462
preempt disabled:  39.079           160300385

 preempt is 10.8 percent faster than irqs disabled.

I wrote a patch to count function trace recursion and reran hackbench.

With irq disabled: 1,150 times the function tracer did not trace due to
  recursion.
with preempt disabled: 5,117,718 times.

The thousand times with irq disabled could be due to NMIs, or simply a case
where it called a function that was not protected by notrace.

But we also see that a large amount of the trace is lost with the
preempt version.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 182e9f5f
...@@ -244,6 +244,7 @@ static const char *trace_options[] = { ...@@ -244,6 +244,7 @@ static const char *trace_options[] = {
"stacktrace", "stacktrace",
"sched-tree", "sched-tree",
"ftrace_printk", "ftrace_printk",
"ftrace_preempt",
NULL NULL
}; };
...@@ -891,7 +892,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) ...@@ -891,7 +892,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static void static void
function_trace_call(unsigned long ip, unsigned long parent_ip) function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{ {
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -917,6 +918,37 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -917,6 +918,37 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
ftrace_preempt_enable(resched); ftrace_preempt_enable(resched);
} }
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;
if (unlikely(!ftrace_function_enabled))
return;
/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
raw_local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, data, ip, parent_ip, flags, pc);
}
atomic_dec(&data->disabled);
raw_local_irq_restore(flags);
}
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = function_trace_call, .func = function_trace_call,
...@@ -925,6 +957,12 @@ static struct ftrace_ops trace_ops __read_mostly = ...@@ -925,6 +957,12 @@ static struct ftrace_ops trace_ops __read_mostly =
void tracing_start_function_trace(void) void tracing_start_function_trace(void)
{ {
ftrace_function_enabled = 0; ftrace_function_enabled = 0;
if (trace_flags & TRACE_ITER_PREEMPTONLY)
trace_ops.func = function_trace_call_preempt_only;
else
trace_ops.func = function_trace_call;
register_ftrace_function(&trace_ops); register_ftrace_function(&trace_ops);
if (tracer_enabled) if (tracer_enabled)
ftrace_function_enabled = 1; ftrace_function_enabled = 1;
......
...@@ -415,6 +415,7 @@ enum trace_iterator_flags { ...@@ -415,6 +415,7 @@ enum trace_iterator_flags {
TRACE_ITER_STACKTRACE = 0x100, TRACE_ITER_STACKTRACE = 0x100,
TRACE_ITER_SCHED_TREE = 0x200, TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400, TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
}; };
extern struct tracer nop_trace; extern struct tracer nop_trace;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment