Commit b07c3f19 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Ingo Molnar

ftrace: port to tracepoints

Porting the trace_mark() used by ftrace to tracepoints. (cleanup)

Changelog :
- Change error messages : marker -> tracepoint

[ mingo@elte.hu: conflict resolutions ]
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Acked-by: default avatar'Peter Zijlstra' <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0a16b607
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/marker.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <trace/sched.h>
#include "trace.h" #include "trace.h"
...@@ -19,16 +19,17 @@ static int __read_mostly tracer_enabled; ...@@ -19,16 +19,17 @@ static int __read_mostly tracer_enabled;
static atomic_t sched_ref; static atomic_t sched_ref;
static void static void
sched_switch_func(void *private, void *__rq, struct task_struct *prev, probe_sched_switch(struct rq *__rq, struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
struct trace_array **ptr = private;
struct trace_array *tr = *ptr;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int cpu; int cpu;
if (!atomic_read(&sched_ref))
return;
tracing_record_cmdline(prev); tracing_record_cmdline(prev);
tracing_record_cmdline(next); tracing_record_cmdline(next);
...@@ -37,95 +38,42 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev, ...@@ -37,95 +38,42 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev,
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) if (likely(disabled == 1))
tracing_sched_switch_trace(tr, data, prev, next, flags); tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
static notrace void
sched_switch_callback(void *probe_data, void *call_data,
const char *format, va_list *args)
{
struct task_struct *prev;
struct task_struct *next;
struct rq *__rq;
if (!atomic_read(&sched_ref))
return;
/* skip prev_pid %d next_pid %d prev_state %ld */
(void)va_arg(*args, int);
(void)va_arg(*args, int);
(void)va_arg(*args, long);
__rq = va_arg(*args, typeof(__rq));
prev = va_arg(*args, typeof(prev));
next = va_arg(*args, typeof(next));
/*
* If tracer_switch_func only points to the local
* switch func, it still needs the ptr passed to it.
*/
sched_switch_func(probe_data, __rq, prev, next);
}
static void static void
wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
task_struct *curr)
{ {
struct trace_array **ptr = private;
struct trace_array *tr = *ptr;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int cpu; int cpu;
if (!tracer_enabled) if (!likely(tracer_enabled))
return; return;
tracing_record_cmdline(curr); tracing_record_cmdline(current);
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) if (likely(disabled == 1))
tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
flags);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
static notrace void
wake_up_callback(void *probe_data, void *call_data,
const char *format, va_list *args)
{
struct task_struct *curr;
struct task_struct *task;
struct rq *__rq;
if (likely(!tracer_enabled))
return;
/* Skip pid %d state %ld */
(void)va_arg(*args, int);
(void)va_arg(*args, long);
/* now get the meat: "rq %p task %p rq->curr %p" */
__rq = va_arg(*args, typeof(__rq));
task = va_arg(*args, typeof(task));
curr = va_arg(*args, typeof(curr));
tracing_record_cmdline(task);
tracing_record_cmdline(curr);
wakeup_func(probe_data, __rq, task, curr);
}
static void sched_switch_reset(struct trace_array *tr) static void sched_switch_reset(struct trace_array *tr)
{ {
int cpu; int cpu;
...@@ -140,60 +88,40 @@ static int tracing_sched_register(void) ...@@ -140,60 +88,40 @@ static int tracing_sched_register(void)
{ {
int ret; int ret;
ret = marker_probe_register("kernel_sched_wakeup", ret = register_trace_sched_wakeup(probe_sched_wakeup);
"pid %d state %ld ## rq %p task %p rq->curr %p",
wake_up_callback,
&ctx_trace);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't add marker" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n"); " probe to kernel_sched_wakeup\n");
return ret; return ret;
} }
ret = marker_probe_register("kernel_sched_wakeup_new", ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
"pid %d state %ld ## rq %p task %p rq->curr %p",
wake_up_callback,
&ctx_trace);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't add marker" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n"); " probe to kernel_sched_wakeup_new\n");
goto fail_deprobe; goto fail_deprobe;
} }
ret = marker_probe_register("kernel_sched_schedule", ret = register_trace_sched_switch(probe_sched_switch);
"prev_pid %d next_pid %d prev_state %ld "
"## rq %p prev %p next %p",
sched_switch_callback,
&ctx_trace);
if (ret) { if (ret) {
pr_info("sched trace: Couldn't add marker" pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_schedule\n"); " probe to kernel_sched_schedule\n");
goto fail_deprobe_wake_new; goto fail_deprobe_wake_new;
} }
return ret; return ret;
fail_deprobe_wake_new: fail_deprobe_wake_new:
marker_probe_unregister("kernel_sched_wakeup_new", unregister_trace_sched_wakeup_new(probe_sched_wakeup);
wake_up_callback,
&ctx_trace);
fail_deprobe: fail_deprobe:
marker_probe_unregister("kernel_sched_wakeup", unregister_trace_sched_wakeup(probe_sched_wakeup);
wake_up_callback,
&ctx_trace);
return ret; return ret;
} }
static void tracing_sched_unregister(void) static void tracing_sched_unregister(void)
{ {
marker_probe_unregister("kernel_sched_schedule", unregister_trace_sched_switch(probe_sched_switch);
sched_switch_callback, unregister_trace_sched_wakeup_new(probe_sched_wakeup);
&ctx_trace); unregister_trace_sched_wakeup(probe_sched_wakeup);
marker_probe_unregister("kernel_sched_wakeup_new",
wake_up_callback,
&ctx_trace);
marker_probe_unregister("kernel_sched_wakeup",
wake_up_callback,
&ctx_trace);
} }
static void tracing_start_sched_switch(void) static void tracing_start_sched_switch(void)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/marker.h> #include <trace/sched.h>
#include "trace.h" #include "trace.h"
...@@ -112,18 +112,18 @@ static int report_latency(cycle_t delta) ...@@ -112,18 +112,18 @@ static int report_latency(cycle_t delta)
} }
static void notrace static void notrace
wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
unsigned long latency = 0, t0 = 0, t1 = 0; unsigned long latency = 0, t0 = 0, t1 = 0;
struct trace_array **ptr = private;
struct trace_array *tr = *ptr;
struct trace_array_cpu *data; struct trace_array_cpu *data;
cycle_t T0, T1, delta; cycle_t T0, T1, delta;
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int cpu; int cpu;
tracing_record_cmdline(prev);
if (unlikely(!tracer_enabled)) if (unlikely(!tracer_enabled))
return; return;
...@@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, ...@@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
return; return;
/* The task we are waiting for is waking up */ /* The task we are waiting for is waking up */
data = tr->data[wakeup_cpu]; data = wakeup_trace->data[wakeup_cpu];
/* disable local data, not wakeup_cpu data */ /* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&tr->data[cpu]->disabled); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
if (likely(disabled != 1)) if (likely(disabled != 1))
goto out; goto out;
...@@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, ...@@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
if (unlikely(!tracer_enabled || next != wakeup_task)) if (unlikely(!tracer_enabled || next != wakeup_task))
goto out_unlock; goto out_unlock;
trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags);
/* /*
* usecs conversion is slow so we try to delay the conversion * usecs conversion is slow so we try to delay the conversion
...@@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, ...@@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
t0 = nsecs_to_usecs(T0); t0 = nsecs_to_usecs(T0);
t1 = nsecs_to_usecs(T1); t1 = nsecs_to_usecs(T1);
update_max_tr(tr, wakeup_task, wakeup_cpu); update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
out_unlock: out_unlock:
__wakeup_reset(tr); __wakeup_reset(wakeup_trace);
__raw_spin_unlock(&wakeup_lock); __raw_spin_unlock(&wakeup_lock);
local_irq_restore(flags); local_irq_restore(flags);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
static notrace void
sched_switch_callback(void *probe_data, void *call_data,
const char *format, va_list *args)
{
struct task_struct *prev;
struct task_struct *next;
struct rq *__rq;
/* skip prev_pid %d next_pid %d prev_state %ld */
(void)va_arg(*args, int);
(void)va_arg(*args, int);
(void)va_arg(*args, long);
__rq = va_arg(*args, typeof(__rq));
prev = va_arg(*args, typeof(prev));
next = va_arg(*args, typeof(next));
tracing_record_cmdline(prev);
/*
* If tracer_switch_func only points to the local
* switch func, it still needs the ptr passed to it.
*/
wakeup_sched_switch(probe_data, __rq, prev, next);
} }
static void __wakeup_reset(struct trace_array *tr) static void __wakeup_reset(struct trace_array *tr)
...@@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_array *tr) ...@@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_array *tr)
} }
static void static void
wakeup_check_start(struct trace_array *tr, struct task_struct *p, probe_wakeup(struct rq *rq, struct task_struct *p)
struct task_struct *curr)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
unsigned long flags; unsigned long flags;
long disabled; long disabled;
if (likely(!tracer_enabled))
return;
tracing_record_cmdline(p);
tracing_record_cmdline(current);
if (likely(!rt_task(p)) || if (likely(!rt_task(p)) ||
p->prio >= wakeup_prio || p->prio >= wakeup_prio ||
p->prio >= curr->prio) p->prio >= current->prio)
return; return;
disabled = atomic_inc_return(&tr->data[cpu]->disabled); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
...@@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, ...@@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
goto out_locked; goto out_locked;
/* reset the trace */ /* reset the trace */
__wakeup_reset(tr); __wakeup_reset(wakeup_trace);
wakeup_cpu = task_cpu(p); wakeup_cpu = task_cpu(p);
wakeup_prio = p->prio; wakeup_prio = p->prio;
...@@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, ...@@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
local_save_flags(flags); local_save_flags(flags);
tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
trace_function(tr, tr->data[wakeup_cpu], trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
CALLER_ADDR1, CALLER_ADDR2, flags); CALLER_ADDR1, CALLER_ADDR2, flags);
out_locked: out_locked:
__raw_spin_unlock(&wakeup_lock); __raw_spin_unlock(&wakeup_lock);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
static notrace void
wake_up_callback(void *probe_data, void *call_data,
const char *format, va_list *args)
{
struct trace_array **ptr = probe_data;
struct trace_array *tr = *ptr;
struct task_struct *curr;
struct task_struct *task;
struct rq *__rq;
if (likely(!tracer_enabled))
return;
/* Skip pid %d state %ld */
(void)va_arg(*args, int);
(void)va_arg(*args, long);
/* now get the meat: "rq %p task %p rq->curr %p" */
__rq = va_arg(*args, typeof(__rq));
task = va_arg(*args, typeof(task));
curr = va_arg(*args, typeof(curr));
tracing_record_cmdline(task);
tracing_record_cmdline(curr);
wakeup_check_start(tr, task, curr);
} }
static void start_wakeup_tracer(struct trace_array *tr) static void start_wakeup_tracer(struct trace_array *tr)
{ {
int ret; int ret;
ret = marker_probe_register("kernel_sched_wakeup", ret = register_trace_sched_wakeup(probe_wakeup);
"pid %d state %ld ## rq %p task %p rq->curr %p",
wake_up_callback,
&wakeup_trace);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't add marker" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n"); " probe to kernel_sched_wakeup\n");
return; return;
} }
ret = marker_probe_register("kernel_sched_wakeup_new", ret = register_trace_sched_wakeup_new(probe_wakeup);
"pid %d state %ld ## rq %p task %p rq->curr %p",
wake_up_callback,
&wakeup_trace);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't add marker" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n"); " probe to kernel_sched_wakeup_new\n");
goto fail_deprobe; goto fail_deprobe;
} }
ret = marker_probe_register("kernel_sched_schedule", ret = register_trace_sched_switch(probe_wakeup_sched_switch);
"prev_pid %d next_pid %d prev_state %ld "
"## rq %p prev %p next %p",
sched_switch_callback,
&wakeup_trace);
if (ret) { if (ret) {
pr_info("sched trace: Couldn't add marker" pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_schedule\n"); " probe to kernel_sched_schedule\n");
goto fail_deprobe_wake_new; goto fail_deprobe_wake_new;
} }
...@@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct trace_array *tr)
return; return;
fail_deprobe_wake_new: fail_deprobe_wake_new:
marker_probe_unregister("kernel_sched_wakeup_new", unregister_trace_sched_wakeup_new(probe_wakeup);
wake_up_callback,
&wakeup_trace);
fail_deprobe: fail_deprobe:
marker_probe_unregister("kernel_sched_wakeup", unregister_trace_sched_wakeup(probe_wakeup);
wake_up_callback,
&wakeup_trace);
} }
static void stop_wakeup_tracer(struct trace_array *tr) static void stop_wakeup_tracer(struct trace_array *tr)
{ {
tracer_enabled = 0; tracer_enabled = 0;
unregister_ftrace_function(&trace_ops); unregister_ftrace_function(&trace_ops);
marker_probe_unregister("kernel_sched_schedule", unregister_trace_sched_switch(probe_wakeup_sched_switch);
sched_switch_callback, unregister_trace_sched_wakeup_new(probe_wakeup);
&wakeup_trace); unregister_trace_sched_wakeup(probe_wakeup);
marker_probe_unregister("kernel_sched_wakeup_new",
wake_up_callback,
&wakeup_trace);
marker_probe_unregister("kernel_sched_wakeup",
wake_up_callback,
&wakeup_trace);
} }
static void wakeup_tracer_init(struct trace_array *tr) static void wakeup_tracer_init(struct trace_array *tr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment