Commit 84e53ff7 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core-2' of...

Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull tracing updates from Steven Rostedt.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents ccf59d8d 7bcfaf54
...@@ -2859,6 +2859,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -2859,6 +2859,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
to facilitate early boot debugging. to facilitate early boot debugging.
See also Documentation/trace/events.txt See also Documentation/trace/events.txt
trace_options=[option-list]
[FTRACE] Enable or disable tracer options at boot.
The option-list is a comma delimited list of options
that can be enabled or disabled just as if you were
to echo the option name into
/sys/kernel/debug/tracing/trace_options
For example, to enable stacktrace option (to dump the
stack trace of each event), add to the command line:
trace_options=stacktrace
See also Documentation/trace/ftrace.txt "trace options"
section.
transparent_hugepage= transparent_hugepage=
[KNL] [KNL]
Format: [always|madvise|never] Format: [always|madvise|never]
......
...@@ -127,13 +127,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, ...@@ -127,13 +127,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc, unsigned long flags, int pc,
struct pt_regs *regs); struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer, void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
......
...@@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); ...@@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else #else
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...);
static inline void tracing_start(void) { } static inline void tracing_start(void) { }
static inline void tracing_stop(void) { } static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { } static inline void ftrace_off_permanent(void) { }
...@@ -539,8 +536,8 @@ static inline void tracing_on(void) { } ...@@ -539,8 +536,8 @@ static inline void tracing_on(void) { }
static inline void tracing_off(void) { } static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; } static inline int tracing_is_on(void) { return 0; }
static inline int static inline __printf(1, 2)
trace_printk(const char *fmt, ...) int trace_printk(const char *fmt, ...)
{ {
return 0; return 0;
} }
......
...@@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer); ...@@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
......
...@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \ ...@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \
{ assign; } \ { assign; } \
\ \
if (!filter_current_check_discard(buffer, event_call, entry, event)) \ if (!filter_current_check_discard(buffer, event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(buffer, \ trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
event, irq_flags, pc); \
} }
/* /*
* The ftrace_test_probe is compiled out, it is only here as a build time check * The ftrace_test_probe is compiled out, it is only here as a build time check
......
...@@ -31,27 +31,4 @@ struct syscall_metadata { ...@@ -31,27 +31,4 @@ struct syscall_metadata {
struct ftrace_event_call *exit_event; struct ftrace_event_call *exit_event;
}; };
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call);
extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
extern int reg_event_syscall_exit(struct ftrace_event_call *call);
extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
extern int
ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
struct trace_event *event);
#endif
#ifdef CONFIG_PERF_EVENTS
int perf_sysenter_enable(struct ftrace_event_call *call);
void perf_sysenter_disable(struct ftrace_event_call *call);
int perf_sysexit_enable(struct ftrace_event_call *call);
void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
#endif /* _TRACE_SYSCALL_H */ #endif /* _TRACE_SYSCALL_H */
...@@ -119,6 +119,7 @@ config TRACING ...@@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF select BINARY_PRINTF
select EVENT_TRACING select EVENT_TRACING
select TRACE_CLOCK select TRACE_CLOCK
select IRQ_WORK
config GENERIC_TRACER config GENERIC_TRACER
bool bool
......
...@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void) ...@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void)
{ {
return register_ftrace_command(&ftrace_mod_cmd); return register_ftrace_command(&ftrace_mod_cmd);
} }
device_initcall(ftrace_mod_cmd_init); core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
...@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void) ...@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void)
ftrace_enabled = 1; ftrace_enabled = 1;
return 0; return 0;
} }
device_initcall(ftrace_nodyn_init); core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { } static inline void ftrace_startup_enable(int command) { }
...@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, ...@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (strlen(tmp) == 0) if (strlen(tmp) == 0)
return 1; return 1;
ret = strict_strtol(tmp, 10, &val); ret = kstrtol(tmp, 10, &val);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu { ...@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
unsigned long lost_events; unsigned long lost_events;
unsigned long last_overrun; unsigned long last_overrun;
local_t entries_bytes; local_t entries_bytes;
local_t commit_overrun;
local_t overrun;
local_t entries; local_t entries;
local_t overrun;
local_t commit_overrun;
local_t dropped_events;
local_t committing; local_t committing;
local_t commits; local_t commits;
unsigned long read; unsigned long read;
...@@ -1820,7 +1821,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) ...@@ -1820,7 +1821,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
} }
/** /**
* ring_buffer_update_event - update event type and data * rb_update_event - update event type and data
* @event: the even to update * @event: the even to update
* @type: the type of event * @type: the type of event
* @length: the size of the event field in the ring buffer * @length: the size of the event field in the ring buffer
...@@ -2155,8 +2156,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2155,8 +2156,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* If we are not in overwrite mode, * If we are not in overwrite mode,
* this is easy, just stop here. * this is easy, just stop here.
*/ */
if (!(buffer->flags & RB_FL_OVERWRITE)) if (!(buffer->flags & RB_FL_OVERWRITE)) {
local_inc(&cpu_buffer->dropped_events);
goto out_reset; goto out_reset;
}
ret = rb_handle_head_page(cpu_buffer, ret = rb_handle_head_page(cpu_buffer,
tail_page, tail_page,
...@@ -2720,8 +2723,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); ...@@ -2720,8 +2723,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* and not the length of the event which would hold the header. * and not the length of the event which would hold the header.
*/ */
int ring_buffer_write(struct ring_buffer *buffer, int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, unsigned long length,
void *data) void *data)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -2929,12 +2932,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2929,12 +2932,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to read from. * @cpu: The per CPU buffer to read from.
*/ */
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{ {
unsigned long flags; unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage; struct buffer_page *bpage;
unsigned long ret; u64 ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0; return 0;
...@@ -2995,7 +2998,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) ...@@ -2995,7 +2998,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/** /**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
* buffer wrapping around (only if RB_FL_OVERWRITE is on).
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from * @cpu: The per CPU buffer to get the number of overruns from
*/ */
...@@ -3015,7 +3019,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) ...@@ -3015,7 +3019,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/** /**
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
* commits failing due to the buffer wrapping around while there are uncommitted
* events, such as during an interrupt storm.
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from * @cpu: The per CPU buffer to get the number of overruns from
*/ */
...@@ -3035,6 +3041,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) ...@@ -3035,6 +3041,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
} }
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
/**
* ring_buffer_dropped_events_cpu - get the number of dropped events caused by
* the ring buffer filling up (only if RB_FL_OVERWRITE is off).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->dropped_events);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
/** /**
* ring_buffer_entries - get the number of entries in a buffer * ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer * @buffer: The ring buffer
...@@ -3864,9 +3892,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3864,9 +3892,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0); local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0; cpu_buffer->reader_page->read = 0;
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->entries_bytes, 0); local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0); local_set(&cpu_buffer->overrun, 0);
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0); local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0); local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0); local_set(&cpu_buffer->commits, 0);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/irq_work.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -77,6 +78,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) ...@@ -77,6 +78,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
return 0; return 0;
} }
/*
* To prevent the comm cache from being overwritten when no
* tracing is active, only save the comm when a trace event
* occurred.
*/
static DEFINE_PER_CPU(bool, trace_cmdline_save);
/*
* When a reader is waiting for data, then this variable is
* set to true.
*/
static bool trace_wakeup_needed;
static struct irq_work trace_work_wakeup;
/* /*
* Kill all tracing for good (never come back). * Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization * It is initialized to 1 but will turn to zero if the initialization
...@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str) ...@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str)
} }
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;
static int __init set_trace_boot_options(char *str)
{
strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
trace_boot_options = trace_boot_options_buf;
return 0;
}
__setup("trace_options=", set_trace_boot_options);
unsigned long long ns2usecs(cycle_t nsec) unsigned long long ns2usecs(cycle_t nsec)
{ {
nsec += 500; nsec += 500;
...@@ -198,20 +226,9 @@ static struct trace_array max_tr; ...@@ -198,20 +226,9 @@ static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
/**
* tracing_is_enabled - return tracer_enabled status
*
* This function is used by other tracers to know the status
* of the tracer_enabled flag. Tracers may use this function
* to know if it should enable their features when starting
* up. See irqsoff tracer for an example (start_irqsoff_tracer).
*/
int tracing_is_enabled(void) int tracing_is_enabled(void)
{ {
return tracer_enabled; return tracing_is_on();
} }
/* /*
...@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | ...@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count; static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock); static DEFINE_RAW_SPINLOCK(tracing_start_lock);
static void wakeup_work_handler(struct work_struct *work) /**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
static void trace_wake_up(struct irq_work *work)
{ {
wake_up(&trace_wait); wake_up_all(&trace_wait);
}
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); }
/** /**
* tracing_on - enable tracing buffers * tracing_on - enable tracing buffers
...@@ -393,22 +416,6 @@ int tracing_is_on(void) ...@@ -393,22 +416,6 @@ int tracing_is_on(void)
} }
EXPORT_SYMBOL_GPL(tracing_is_on); EXPORT_SYMBOL_GPL(tracing_is_on);
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
void trace_wake_up(void)
{
const unsigned long delay = msecs_to_jiffies(2);
if (trace_flags & TRACE_ITER_BLOCK)
return;
schedule_delayed_work(&wakeup_work, delay);
}
static int __init set_buf_size(char *str) static int __init set_buf_size(char *str)
{ {
unsigned long buf_size; unsigned long buf_size;
...@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str) ...@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str)
if (!str) if (!str)
return 0; return 0;
ret = strict_strtoul(str, 0, &threshold); ret = kstrtoul(str, 0, &threshold);
if (ret < 0) if (ret < 0)
return 0; return 0;
tracing_thresh = threshold * 1000; tracing_thresh = threshold * 1000;
...@@ -757,6 +764,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -757,6 +764,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
} }
#endif /* CONFIG_TRACER_MAX_TRACE */ #endif /* CONFIG_TRACER_MAX_TRACE */
static void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
/*
* The events can happen in critical sections where
* checking a work queue can cause deadlocks.
* After adding a task to the queue, this flag is set
* only to notify events to try to wake up the queue
* using irq_work.
*
* We don't clear it even if the buffer is no longer
* empty. The flag only causes the next event to run
* irq_work to do the work queue wake up. The worse
* that can happen if we race with !trace_empty() is that
* an event will cause an irq_work to try to wake up
* an empty queue.
*
* There's no reason to protect this flag either, as
* the work queue and irq_work logic will do the necessary
* synchronization for the wake ups. The only thing
* that is necessary is that the wake up happens after
* a task has been queued. It's OK for spurious wake ups.
*/
trace_wakeup_needed = true;
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/** /**
* register_tracer - register a tracer with the ftrace system. * register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer * @type - the plugin for the tracer
...@@ -875,32 +916,6 @@ int register_tracer(struct tracer *type) ...@@ -875,32 +916,6 @@ int register_tracer(struct tracer *type)
return ret; return ret;
} }
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
*t = (*t)->next;
if (type == current_trace && tracer_enabled) {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
out:
mutex_unlock(&trace_types_lock);
}
void tracing_reset(struct trace_array *tr, int cpu) void tracing_reset(struct trace_array *tr, int cpu)
{ {
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->buffer;
...@@ -1131,10 +1146,14 @@ void trace_find_cmdline(int pid, char comm[]) ...@@ -1131,10 +1146,14 @@ void trace_find_cmdline(int pid, char comm[])
void tracing_record_cmdline(struct task_struct *tsk) void tracing_record_cmdline(struct task_struct *tsk)
{ {
if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
!tracing_is_on()) return;
if (!__this_cpu_read(trace_cmdline_save))
return; return;
__this_cpu_write(trace_cmdline_save, false);
trace_save_cmdline(tsk); trace_save_cmdline(tsk);
} }
...@@ -1178,27 +1197,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -1178,27 +1197,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
return event; return event;
} }
void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_cmdline_save, true);
if (trace_wakeup_needed) {
trace_wakeup_needed = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&trace_work_wakeup);
}
ring_buffer_unlock_commit(buffer, event);
}
static inline void static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer, __trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc, unsigned long flags, int pc)
int wake)
{ {
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc); ftrace_trace_userstack(buffer, flags, pc);
if (wake)
trace_wake_up();
} }
void trace_buffer_unlock_commit(struct ring_buffer *buffer, void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1); __trace_buffer_unlock_commit(buffer, event, flags, pc);
} }
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
struct ring_buffer_event * struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
...@@ -1215,29 +1243,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, ...@@ -1215,29 +1243,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1); __trace_buffer_unlock_commit(buffer, event, flags, pc);
} }
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned long flags, int pc,
{ struct pt_regs *regs)
__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{ {
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc); ftrace_trace_userstack(buffer, flags, pc);
} }
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer, void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
...@@ -1269,7 +1289,7 @@ trace_function(struct trace_array *tr, ...@@ -1269,7 +1289,7 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
} }
void void
...@@ -1362,7 +1382,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -1362,7 +1382,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
entry->size = trace.nr_entries; entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out: out:
/* Again, don't let gcc optimize things here */ /* Again, don't let gcc optimize things here */
...@@ -1458,7 +1478,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) ...@@ -1458,7 +1478,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
save_stack_trace_user(&trace); save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out_drop_count: out_drop_count:
__this_cpu_dec(user_stack_count); __this_cpu_dec(user_stack_count);
...@@ -1559,10 +1579,10 @@ static int alloc_percpu_trace_buffer(void) ...@@ -1559,10 +1579,10 @@ static int alloc_percpu_trace_buffer(void)
return -ENOMEM; return -ENOMEM;
} }
static int buffers_allocated;
void trace_printk_init_buffers(void) void trace_printk_init_buffers(void)
{ {
static int buffers_allocated;
if (buffers_allocated) if (buffers_allocated)
return; return;
...@@ -1571,7 +1591,38 @@ void trace_printk_init_buffers(void) ...@@ -1571,7 +1591,38 @@ void trace_printk_init_buffers(void)
pr_info("ftrace: Allocated trace_printk buffers\n"); pr_info("ftrace: Allocated trace_printk buffers\n");
/* Expand the buffers to set size */
tracing_update_buffers();
buffers_allocated = 1; buffers_allocated = 1;
/*
* trace_printk_init_buffers() can be called by modules.
* If that happens, then we need to start cmdline recording
* directly here. If the global_trace.buffer is already
* allocated here, then this was called by module code.
*/
if (global_trace.buffer)
tracing_start_cmdline_record();
}
void trace_printk_start_comm(void)
{
/* Start tracing comms if trace printk is set */
if (!buffers_allocated)
return;
tracing_start_cmdline_record();
}
static void trace_printk_start_stop_comm(int enabled)
{
if (!buffers_allocated)
return;
if (enabled)
tracing_start_cmdline_record();
else
tracing_stop_cmdline_record();
} }
/** /**
...@@ -1622,7 +1673,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -1622,7 +1673,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len); memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) { if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
} }
...@@ -1693,7 +1744,7 @@ int trace_array_vprintk(struct trace_array *tr, ...@@ -1693,7 +1744,7 @@ int trace_array_vprintk(struct trace_array *tr,
memcpy(&entry->buf, tbuffer, len); memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0'; entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) { if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
} }
out: out:
...@@ -2794,26 +2845,19 @@ static void set_tracer_flags(unsigned int mask, int enabled) ...@@ -2794,26 +2845,19 @@ static void set_tracer_flags(unsigned int mask, int enabled)
if (mask == TRACE_ITER_OVERWRITE) if (mask == TRACE_ITER_OVERWRITE)
ring_buffer_change_overwrite(global_trace.buffer, enabled); ring_buffer_change_overwrite(global_trace.buffer, enabled);
if (mask == TRACE_ITER_PRINTK)
trace_printk_start_stop_comm(enabled);
} }
static ssize_t static int trace_set_options(char *option)
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{ {
char buf[64];
char *cmp; char *cmp;
int neg = 0; int neg = 0;
int ret; int ret = 0;
int i; int i;
if (cnt >= sizeof(buf)) cmp = strstrip(option);
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
if (strncmp(cmp, "no", 2) == 0) { if (strncmp(cmp, "no", 2) == 0) {
neg = 1; neg = 1;
...@@ -2832,10 +2876,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, ...@@ -2832,10 +2876,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg); ret = set_tracer_option(current_trace, cmp, neg);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
if (ret)
return ret;
} }
return ret;
}
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
trace_set_options(buf);
*ppos += cnt; *ppos += cnt;
return cnt; return cnt;
...@@ -2939,56 +2998,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = { ...@@ -2939,56 +2998,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
static ssize_t
tracing_ctrl_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
int r;
r = sprintf(buf, "%u\n", tracer_enabled);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_ctrl_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
val = !!val;
mutex_lock(&trace_types_lock);
if (tracer_enabled ^ val) {
/* Only need to warn if this is used to change the state */
WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
if (val) {
tracer_enabled = 1;
if (current_trace->start)
current_trace->start(tr);
tracing_start();
} else {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(tr);
}
}
mutex_unlock(&trace_types_lock);
*ppos += cnt;
return cnt;
}
static ssize_t static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf, tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
...@@ -3030,6 +3039,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu) ...@@ -3030,6 +3039,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
*/ */
ring_buffer_expanded = 1; ring_buffer_expanded = 1;
/* May be called before buffers are initialized */
if (!global_trace.buffer)
return 0;
ret = ring_buffer_resize(global_trace.buffer, size, cpu); ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -3385,19 +3398,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) ...@@ -3385,19 +3398,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
} }
} }
void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/* /*
* This is a make-shift waitqueue. * This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases: * A tracer might use this callback on some rare cases:
...@@ -3438,7 +3438,7 @@ static int tracing_wait_pipe(struct file *filp) ...@@ -3438,7 +3438,7 @@ static int tracing_wait_pipe(struct file *filp)
return -EINTR; return -EINTR;
/* /*
* We block until we read something and tracing is disabled. * We block until we read something and tracing is enabled.
* We still block if tracing is disabled, but we have never * We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and * read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something, * then enable tracing. But after we have read something,
...@@ -3446,7 +3446,7 @@ static int tracing_wait_pipe(struct file *filp) ...@@ -3446,7 +3446,7 @@ static int tracing_wait_pipe(struct file *filp)
* *
* iter->pos will be 0 if we haven't read anything. * iter->pos will be 0 if we haven't read anything.
*/ */
if (!tracer_enabled && iter->pos) if (tracing_is_enabled() && iter->pos)
break; break;
} }
...@@ -3955,7 +3955,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, ...@@ -3955,7 +3955,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else } else
entry->buf[cnt] = '\0'; entry->buf[cnt] = '\0';
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
written = cnt; written = cnt;
...@@ -4016,6 +4016,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, ...@@ -4016,6 +4016,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
if (max_tr.buffer) if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
/*
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
tracing_reset_online_cpus(&global_trace);
if (max_tr.buffer)
tracing_reset_online_cpus(&max_tr);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
*fpos += cnt; *fpos += cnt;
...@@ -4037,13 +4045,6 @@ static const struct file_operations tracing_max_lat_fops = { ...@@ -4037,13 +4045,6 @@ static const struct file_operations tracing_max_lat_fops = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
static const struct file_operations tracing_ctrl_fops = {
.open = tracing_open_generic,
.read = tracing_ctrl_read,
.write = tracing_ctrl_write,
.llseek = generic_file_llseek,
};
static const struct file_operations set_tracer_fops = { static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic, .open = tracing_open_generic,
.read = tracing_set_trace_read, .read = tracing_set_trace_read,
...@@ -4385,6 +4386,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf, ...@@ -4385,6 +4386,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
usec_rem = do_div(t, USEC_PER_SEC); usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
trace_seq_printf(s, "dropped events: %ld\n", cnt);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
kfree(s); kfree(s);
...@@ -4815,9 +4819,6 @@ static __init int tracer_init_debugfs(void) ...@@ -4815,9 +4819,6 @@ static __init int tracer_init_debugfs(void)
d_tracer = tracing_init_dentry(); d_tracer = tracing_init_dentry();
trace_create_file("tracing_enabled", 0644, d_tracer,
&global_trace, &tracing_ctrl_fops);
trace_create_file("trace_options", 0644, d_tracer, trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops); NULL, &tracing_iter_fops);
...@@ -5089,6 +5090,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -5089,6 +5090,7 @@ __init static int tracer_alloc_buffers(void)
/* Only allocate trace_printk buffers if a trace_printk exists */ /* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
/* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers(); trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */ /* To save memory, keep the ring buffer size to its minimum */
...@@ -5136,6 +5138,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -5136,6 +5138,7 @@ __init static int tracer_alloc_buffers(void)
#endif #endif
trace_init_cmdlines(); trace_init_cmdlines();
init_irq_work(&trace_work_wakeup, trace_wake_up);
register_tracer(&nop_trace); register_tracer(&nop_trace);
current_trace = &nop_trace; current_trace = &nop_trace;
...@@ -5147,6 +5150,13 @@ __init static int tracer_alloc_buffers(void) ...@@ -5147,6 +5150,13 @@ __init static int tracer_alloc_buffers(void)
register_die_notifier(&trace_die_notifier); register_die_notifier(&trace_die_notifier);
while (trace_boot_options) {
char *option;
option = strsep(&trace_boot_options, ",");
trace_set_options(option);
}
return 0; return 0;
out_free_cpumask: out_free_cpumask:
......
...@@ -285,8 +285,8 @@ struct tracer { ...@@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set); int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next; struct tracer *next;
struct tracer_flags *flags; struct tracer_flags *flags;
int print_max; bool print_max;
int use_max_tr; bool use_max_tr;
}; };
...@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) ...@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr); int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void); int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu); void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr); void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu); void tracing_reset_current(int cpu);
...@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len, unsigned long len,
unsigned long flags, unsigned long flags,
int pc); int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data); struct trace_array_cpu *data);
...@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, ...@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts); int *ent_cpu, u64 *ent_ts);
void __buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);
int trace_empty(struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter); void *trace_find_next_entry_inc(struct trace_iterator *iter);
...@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter); ...@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu); void tracing_iter_reset(struct trace_iterator *iter, int cpu);
void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter); void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr, void ftrace(struct trace_array *tr,
...@@ -407,7 +405,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr); ...@@ -407,7 +405,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void); void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void); void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type); int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void); int is_tracing_stopped(void);
enum trace_file_type { enum trace_file_type {
TRACE_FILE_LAT_FMT = 1, TRACE_FILE_LAT_FMT = 1,
...@@ -841,6 +838,7 @@ extern const char *__start___trace_bprintk_fmt[]; ...@@ -841,6 +838,7 @@ extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[]; extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void); void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
......
...@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->correct = val == expect; entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
...@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void) ...@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void)
} }
return register_tracer(&branch_trace); return register_tracer(&branch_trace);
} }
device_initcall(init_branch_tracer); core_initcall(init_branch_tracer);
#else #else
static inline static inline
......
...@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p) ...@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
} }
static int
ftrace_event_seq_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events();
seq_ops = inode->i_private;
return seq_open(file, seq_ops);
}
static ssize_t static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos) loff_t *ppos)
...@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) ...@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r; return r;
} }
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
static const struct seq_operations show_event_seq_ops = { static const struct seq_operations show_event_seq_ops = {
.start = t_start, .start = t_start,
.next = t_next, .next = t_next,
...@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = { ...@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = {
}; };
static const struct file_operations ftrace_avail_fops = { static const struct file_operations ftrace_avail_fops = {
.open = ftrace_event_seq_open, .open = ftrace_event_avail_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release, .release = seq_release,
}; };
static const struct file_operations ftrace_set_event_fops = { static const struct file_operations ftrace_set_event_fops = {
.open = ftrace_event_seq_open, .open = ftrace_event_set_open,
.read = seq_read, .read = seq_read,
.write = ftrace_event_write, .write = ftrace_event_write,
.llseek = seq_lseek, .llseek = seq_lseek,
...@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void) ...@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void)
return d_events; return d_events;
} }
static int
ftrace_event_avail_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_event_seq_ops;
return seq_open(file, seq_ops);
}
static int
ftrace_event_set_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_set_event_seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events();
return seq_open(file, seq_ops);
}
static struct dentry * static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events) event_subsystem_dir(const char *name, struct dentry *d_events)
{ {
...@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void) ...@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void)
if (ret) if (ret)
pr_warn("Failed to enable trace event: %s\n", token); pr_warn("Failed to enable trace event: %s\n", token);
} }
trace_printk_start_comm();
return 0; return 0;
} }
...@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void) ...@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void)
return 0; return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer, entry = debugfs_create_file("available_events", 0444, d_tracer,
(void *)&show_event_seq_ops, NULL, &ftrace_avail_fops);
&ftrace_avail_fops);
if (!entry) if (!entry)
pr_warning("Could not create debugfs " pr_warning("Could not create debugfs "
"'available_events' entry\n"); "'available_events' entry\n");
entry = debugfs_create_file("set_event", 0644, d_tracer, entry = debugfs_create_file("set_event", 0644, d_tracer,
(void *)&show_set_event_seq_ops, NULL, &ftrace_set_event_fops);
&ftrace_set_event_fops);
if (!entry) if (!entry)
pr_warning("Could not create debugfs " pr_warning("Could not create debugfs "
"'set_event' entry\n"); "'set_event' entry\n");
...@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip, ...@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); trace_buffer_unlock_commit(buffer, event, flags, pc);
out: out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
......
...@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps, ...@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps,
} }
} else { } else {
if (field->is_signed) if (field->is_signed)
ret = strict_strtoll(pred->regex.pattern, 0, &val); ret = kstrtoll(pred->regex.pattern, 0, &val);
else else
ret = strict_strtoull(pred->regex.pattern, 0, &val); ret = kstrtoull(pred->regex.pattern, 0, &val);
if (ret) { if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL; return -EINVAL;
......
...@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, ...@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
* We use the callback data field (which is a pointer) * We use the callback data field (which is a pointer)
* as our counter. * as our counter.
*/ */
ret = strict_strtoul(number, 0, (unsigned long *)&count); ret = kstrtoul(number, 0, (unsigned long *)&count);
if (ret) if (ret)
return ret; return ret;
...@@ -411,5 +411,4 @@ static __init int init_function_trace(void) ...@@ -411,5 +411,4 @@ static __init int init_function_trace(void)
init_func_cmd_traceon(); init_func_cmd_traceon();
return register_tracer(&function_trace); return register_tracer(&function_trace);
} }
device_initcall(init_function_trace); core_initcall(init_function_trace);
...@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr, ...@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->graph_ent = *trace; entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
return 1; return 1;
} }
...@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr, ...@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->ret = *trace; entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
} }
void trace_graph_return(struct ftrace_graph_ret *trace) void trace_graph_return(struct ftrace_graph_ret *trace)
...@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void) ...@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void)
return register_tracer(&graph_trace); return register_tracer(&graph_trace);
} }
device_initcall(init_graph_trace); core_initcall(init_graph_trace);
...@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset, .reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start, .start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop, .stop = irqsoff_tracer_stop,
.print_max = 1, .print_max = true,
.print_header = irqsoff_print_header, .print_header = irqsoff_print_header,
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
...@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.use_max_tr = 1, .use_max_tr = true,
}; };
# define register_irqsoff(trace) register_tracer(&trace) # define register_irqsoff(trace) register_tracer(&trace)
#else #else
...@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly = ...@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset, .reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start, .start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop, .stop = irqsoff_tracer_stop,
.print_max = 1, .print_max = true,
.print_header = irqsoff_print_header, .print_header = irqsoff_print_header,
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
...@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly = ...@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.use_max_tr = 1, .use_max_tr = true,
}; };
# define register_preemptoff(trace) register_tracer(&trace) # define register_preemptoff(trace) register_tracer(&trace)
#else #else
...@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = ...@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset, .reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start, .start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop, .stop = irqsoff_tracer_stop,
.print_max = 1, .print_max = true,
.print_header = irqsoff_print_header, .print_header = irqsoff_print_header,
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
...@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = ...@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.use_max_tr = 1, .use_max_tr = true,
}; };
# define register_preemptirqsoff(trace) register_tracer(&trace) # define register_preemptirqsoff(trace) register_tracer(&trace)
...@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void) ...@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void)
return 0; return 0;
} }
device_initcall(init_irqsoff_tracer); core_initcall(init_irqsoff_tracer);
...@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv) ...@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL; return -EINVAL;
} }
/* an address specified */ /* an address specified */
ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
if (ret) { if (ret) {
pr_info("Failed to parse address.\n"); pr_info("Failed to parse address.\n");
return ret; return ret;
...@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) ...@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event, trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs); irq_flags, pc, regs);
} }
/* Kretprobe handler */ /* Kretprobe handler */
...@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, ...@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event, trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs); irq_flags, pc, regs);
} }
/* Event entry printers */ /* Event entry printers */
......
...@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type) ...@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type)
goto fail; goto fail;
type++; type++;
if (strict_strtoul(type, 0, &bs)) if (kstrtoul(type, 0, &bs))
goto fail; goto fail;
switch (bs) { switch (bs) {
...@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) ...@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
tmp = strchr(symbol, '+'); tmp = strchr(symbol, '+');
if (tmp) { if (tmp) {
/* skip sign because strict_strtol doesn't accept '+' */ /* skip sign because kstrtoul doesn't accept '+' */
ret = strict_strtoul(tmp + 1, 0, offset); ret = kstrtoul(tmp + 1, 0, offset);
if (ret) if (ret)
return ret; return ret;
...@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, ...@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
else else
ret = -EINVAL; ret = -EINVAL;
} else if (isdigit(arg[5])) { } else if (isdigit(arg[5])) {
ret = strict_strtoul(arg + 5, 10, &param); ret = kstrtoul(arg + 5, 10, &param);
if (ret || param > PARAM_MAX_STACK) if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL; ret = -EINVAL;
else { else {
...@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, ...@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
case '@': /* memory or symbol */ case '@': /* memory or symbol */
if (isdigit(arg[1])) { if (isdigit(arg[1])) {
ret = strict_strtoul(arg + 1, 0, &param); ret = kstrtoul(arg + 1, 0, &param);
if (ret) if (ret)
break; break;
...@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, ...@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
break; break;
case '+': /* deref memory */ case '+': /* deref memory */
arg++; /* Skip '+', because strict_strtol() rejects it. */ arg++; /* Skip '+', because kstrtol() rejects it. */
case '-': case '-':
tmp = strchr(arg, '('); tmp = strchr(arg, '(');
if (!tmp) if (!tmp)
break; break;
*tmp = '\0'; *tmp = '\0';
ret = strict_strtol(arg, 0, &offset); ret = kstrtol(arg, 0, &offset);
if (ret) if (ret)
break; break;
......
...@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event); trace_buffer_unlock_commit(buffer, event, flags, pc);
ftrace_trace_stack(tr->buffer, flags, 6, pc);
ftrace_trace_userstack(tr->buffer, flags, pc);
} }
static void static void
......
...@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly = ...@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset, .reset = wakeup_tracer_reset,
.start = wakeup_tracer_start, .start = wakeup_tracer_start,
.stop = wakeup_tracer_stop, .stop = wakeup_tracer_stop,
.print_max = 1, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
...@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly = ...@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif #endif
.open = wakeup_trace_open, .open = wakeup_trace_open,
.close = wakeup_trace_close, .close = wakeup_trace_close,
.use_max_tr = 1, .use_max_tr = true,
}; };
static struct tracer wakeup_rt_tracer __read_mostly = static struct tracer wakeup_rt_tracer __read_mostly =
...@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start, .start = wakeup_tracer_start,
.stop = wakeup_tracer_stop, .stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe, .wait_pipe = poll_wait_pipe,
.print_max = 1, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
...@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif #endif
.open = wakeup_trace_open, .open = wakeup_trace_open,
.close = wakeup_trace_close, .close = wakeup_trace_close,
.use_max_tr = 1, .use_max_tr = true,
}; };
__init static int init_wakeup_tracer(void) __init static int init_wakeup_tracer(void)
...@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void) ...@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void)
return 0; return 0;
} }
device_initcall(init_wakeup_tracer); core_initcall(init_wakeup_tracer);
...@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
int (*func)(void)) int (*func)(void))
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
unsigned long count; unsigned long count;
char *func_name; char *func_name;
int ret; int ret;
...@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* enable tracing, and record the filter function */ /* enable tracing, and record the filter function */
ftrace_enabled = 1; ftrace_enabled = 1;
tracer_enabled = 1;
/* passed in by parameter to fool gcc from optimizing */ /* passed in by parameter to fool gcc from optimizing */
func(); func();
...@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
out: out:
ftrace_enabled = save_ftrace_enabled; ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
/* Enable tracing on all functions again */ /* Enable tracing on all functions again */
ftrace_set_global_filter(NULL, 0, 1); ftrace_set_global_filter(NULL, 0, 1);
...@@ -452,7 +449,6 @@ static int ...@@ -452,7 +449,6 @@ static int
trace_selftest_function_recursion(void) trace_selftest_function_recursion(void)
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name; char *func_name;
int len; int len;
int ret; int ret;
...@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void) ...@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void)
/* enable tracing, and record the filter function */ /* enable tracing, and record the filter function */
ftrace_enabled = 1; ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */ /* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
...@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void) ...@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void)
ret = 0; ret = 0;
out: out:
ftrace_enabled = save_ftrace_enabled; ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret; return ret;
} }
...@@ -569,7 +563,6 @@ static int ...@@ -569,7 +563,6 @@ static int
trace_selftest_function_regs(void) trace_selftest_function_regs(void)
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name; char *func_name;
int len; int len;
int ret; int ret;
...@@ -586,7 +579,6 @@ trace_selftest_function_regs(void) ...@@ -586,7 +579,6 @@ trace_selftest_function_regs(void)
/* enable tracing, and record the filter function */ /* enable tracing, and record the filter function */
ftrace_enabled = 1; ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */ /* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
...@@ -648,7 +640,6 @@ trace_selftest_function_regs(void) ...@@ -648,7 +640,6 @@ trace_selftest_function_regs(void)
ret = 0; ret = 0;
out: out:
ftrace_enabled = save_ftrace_enabled; ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret; return ret;
} }
...@@ -662,7 +653,6 @@ int ...@@ -662,7 +653,6 @@ int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
unsigned long count; unsigned long count;
int ret; int ret;
...@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ...@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* start the tracing */ /* start the tracing */
ftrace_enabled = 1; ftrace_enabled = 1;
tracer_enabled = 1;
ret = tracer_init(trace, tr); ret = tracer_init(trace, tr);
if (ret) { if (ret) {
...@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ...@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_function_regs(); ret = trace_selftest_function_regs();
out: out:
ftrace_enabled = save_ftrace_enabled; ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
/* kill ftrace totally if we failed */ /* kill ftrace totally if we failed */
if (ret) if (ret)
...@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) ...@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop(); tracing_stop();
/* check both trace buffers */ /* check both trace buffers */
ret = trace_test_buffer(tr, NULL); ret = trace_test_buffer(tr, NULL);
printk("ret = %d\n", ret);
if (!ret) if (!ret)
ret = trace_test_buffer(&max_tr, &count); ret = trace_test_buffer(&max_tr, &count);
......
...@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event, ...@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event,
static int syscall_exit_register(struct ftrace_event_call *event, static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type, void *data); enum trace_reg type, void *data);
static int syscall_enter_define_fields(struct ftrace_event_call *call);
static int syscall_exit_define_fields(struct ftrace_event_call *call);
static struct list_head * static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call) syscall_get_enter_fields(struct ftrace_event_call *call)
{ {
...@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call) ...@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
return &entry->enter_fields; return &entry->enter_fields;
} }
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace,
};
extern struct syscall_metadata *__start_syscalls_metadata[]; extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[]; extern struct syscall_metadata *__stop_syscalls_metadata[];
...@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) ...@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
} }
int init_syscall_trace(struct ftrace_event_call *call) static int init_syscall_trace(struct ftrace_event_call *call)
{ {
int id; int id;
int num; int num;
...@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call) ...@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call)
return id; return id;
} }
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace,
};
unsigned long __init __weak arch_syscall_addr(int nr) unsigned long __init __weak arch_syscall_addr(int nr)
{ {
return (unsigned long)sys_call_table[nr]; return (unsigned long)sys_call_table[nr];
...@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
} }
int perf_sysenter_enable(struct ftrace_event_call *call) static int perf_sysenter_enable(struct ftrace_event_call *call)
{ {
int ret = 0; int ret = 0;
int num; int num;
...@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call) ...@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
return ret; return ret;
} }
void perf_sysenter_disable(struct ftrace_event_call *call) static void perf_sysenter_disable(struct ftrace_event_call *call)
{ {
int num; int num;
...@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
} }
int perf_sysexit_enable(struct ftrace_event_call *call) static int perf_sysexit_enable(struct ftrace_event_call *call)
{ {
int ret = 0; int ret = 0;
int num; int num;
...@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call) ...@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
return ret; return ret;
} }
void perf_sysexit_disable(struct ftrace_event_call *call) static void perf_sysexit_disable(struct ftrace_event_call *call)
{ {
int num; int num;
......
...@@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret) if (ret)
goto fail_address_parse; goto fail_address_parse;
ret = strict_strtoul(arg, 0, &offset); ret = kstrtoul(arg, 0, &offset);
if (ret) if (ret)
goto fail_address_parse; goto fail_address_parse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment