Commit 22402cd0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracking updates from Steven Rostedt:
 "Most of the changes are clean ups and small fixes.  Some of them have
  stable tags to them.  I searched through my INBOX just as the merge
  window opened and found lots of patches to pull.  I ran them through
  all my tests and they were in linux-next for a few days.

  Features added this release:
  ----------------------------

   - Module globbing.  You can now filter function tracing to several
     modules.  # echo '*:mod:*snd*' > set_ftrace_filter (Dmitry Safonov)

   - Tracer specific options are now visible even when the tracer is not
     active.  It was rather annoying that you can only see and modify
     tracer options after enabling the tracer.  Now they are in the
     options/ directory even when the tracer is not active.  Although
     they are still only visible when the tracer is active in the
     trace_options file.

   - Trace options are now per instance (although some of the tracer
     specific options are global)

   - New tracefs file: set_event_pid.  If any pid is added to this file,
     then all events in the instance will filter out events that are not
     part of this pid.  sched_switch and sched_wakeup events handle next
     and the wakee pids"

* tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (68 commits)
  tracefs: Fix refcount imbalance in start_creating()
  tracing: Put back comma for empty fields in boot string parsing
  tracing: Apply tracer specific options from kernel command line.
  tracing: Add some documentation about set_event_pid
  ring_buffer: Remove unneeded smp_wmb() before wakeup of reader benchmark
  tracing: Allow dumping traces without tracking trace started cpus
  ring_buffer: Fix more races when terminating the producer in the benchmark
  ring_buffer: Do no not complete benchmark reader too early
  tracing: Remove redundant TP_ARGS redefining
  tracing: Rename max_stack_lock to stack_trace_max_lock
  tracing: Allow arch-specific stack tracer
  recordmcount: arm64: Replace the ignored mcount call into nop
  recordmcount: Fix endianness handling bug for nop_mcount
  tracepoints: Fix documentation of RCU lockdep checks
  tracing: ftrace_event_is_function() can return boolean
  tracing: is_legal_op() can return boolean
  ring-buffer: rb_event_is_commit() can return boolean
  ring-buffer: rb_per_cpu_empty() can return boolean
  ring_buffer: ring_buffer_empty{cpu}() can return boolean
  ring-buffer: rb_is_reader_page() can return boolean
  ...
parents 7c623cac d227c3ae
...@@ -288,6 +288,24 @@ prev_pid == 0 ...@@ -288,6 +288,24 @@ prev_pid == 0
# cat sched_wakeup/filter # cat sched_wakeup/filter
common_pid == 0 common_pid == 0
5.4 PID filtering
-----------------
The set_event_pid file in the same directory as the top events directory
exists, will filter all events from tracing any task that does not have the
PID listed in the set_event_pid file.
# cd /sys/kernel/debug/tracing
# echo $$ > set_event_pid
# echo 1 > events/enabled
Will only trace events for the current task.
To add more PIDs without losing the PIDs already included, use '>>'.
# echo 123 244 1 >> set_event_pid
6. Event triggers 6. Event triggers
================= =================
......
...@@ -204,6 +204,12 @@ of ftrace. Here is a list of some of the key files: ...@@ -204,6 +204,12 @@ of ftrace. Here is a list of some of the key files:
Have the function tracer only trace a single thread. Have the function tracer only trace a single thread.
set_event_pid:
Have the events only trace a task with a PID listed in this file.
Note, sched_switch and sched_wake_up will also trace events
listed in this file.
set_graph_function: set_graph_function:
Set a "trigger" function where tracing should start Set a "trigger" function where tracing should start
...@@ -2437,6 +2443,23 @@ The following commands are supported: ...@@ -2437,6 +2443,23 @@ The following commands are supported:
echo '!writeback*:mod:ext3' >> set_ftrace_filter echo '!writeback*:mod:ext3' >> set_ftrace_filter
Mod command supports module globbing. Disable tracing for all
functions except a specific module:
echo '!*:mod:!ext3' >> set_ftrace_filter
Disable tracing for all modules, but still trace kernel:
echo '!*:mod:*' >> set_ftrace_filter
Enable filter only for kernel:
echo '*write*:mod:!*' >> set_ftrace_filter
Enable filter for module globbing:
echo '*write*:mod:*snd*' >> set_ftrace_filter
- traceon/traceoff - traceon/traceoff
These commands turn tracing on and off when the specified These commands turn tracing on and off when the specified
functions are hit. The parameter determines how many times the functions are hit. The parameter determines how many times the
......
...@@ -556,6 +556,7 @@ void ftrace_replace_code(int enable) ...@@ -556,6 +556,7 @@ void ftrace_replace_code(int enable)
run_sync(); run_sync();
report = "updating code"; report = "updating code";
count = 0;
for_ftrace_rec_iter(iter) { for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter); rec = ftrace_rec_iter_record(iter);
...@@ -563,11 +564,13 @@ void ftrace_replace_code(int enable) ...@@ -563,11 +564,13 @@ void ftrace_replace_code(int enable)
ret = add_update(rec, enable); ret = add_update(rec, enable);
if (ret) if (ret)
goto remove_breakpoints; goto remove_breakpoints;
count++;
} }
run_sync(); run_sync();
report = "removing breakpoints"; report = "removing breakpoints";
count = 0;
for_ftrace_rec_iter(iter) { for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter); rec = ftrace_rec_iter_record(iter);
...@@ -575,6 +578,7 @@ void ftrace_replace_code(int enable) ...@@ -575,6 +578,7 @@ void ftrace_replace_code(int enable)
ret = finish_update(rec, enable); ret = finish_update(rec, enable);
if (ret) if (ret)
goto remove_breakpoints; goto remove_breakpoints;
count++;
} }
run_sync(); run_sync();
......
...@@ -340,8 +340,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) ...@@ -340,8 +340,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
dput(dentry); dput(dentry);
dentry = ERR_PTR(-EEXIST); dentry = ERR_PTR(-EEXIST);
} }
if (IS_ERR(dentry))
if (IS_ERR(dentry)) {
mutex_unlock(&parent->d_inode->i_mutex); mutex_unlock(&parent->d_inode->i_mutex);
simple_release_fs(&tracefs_mount, &tracefs_mount_count);
}
return dentry; return dentry;
} }
......
...@@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { } ...@@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { }
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER #ifdef CONFIG_STACK_TRACER
#define STACK_TRACE_ENTRIES 500
struct stack_trace;
extern unsigned stack_trace_index[];
extern struct stack_trace stack_trace_max;
extern unsigned long stack_trace_max_size;
extern arch_spinlock_t stack_trace_max_lock;
extern int stack_tracer_enabled; extern int stack_tracer_enabled;
void stack_trace_print(void);
int int
stack_trace_sysctl(struct ctl_table *table, int write, stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
......
...@@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a, ...@@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
} }
#endif #endif
int ring_buffer_empty(struct ring_buffer *buffer); bool ring_buffer_empty(struct ring_buffer *buffer);
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_disable(struct ring_buffer *buffer); void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_enable(struct ring_buffer *buffer);
......
...@@ -168,13 +168,12 @@ struct ring_buffer_event * ...@@ -168,13 +168,12 @@ struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len, int type, unsigned long len,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer_event *event, struct ring_buffer *buffer,
unsigned long flags, int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc, unsigned long flags, int pc,
struct pt_regs *regs); struct pt_regs *regs);
...@@ -329,6 +328,7 @@ enum { ...@@ -329,6 +328,7 @@ enum {
EVENT_FILE_FL_SOFT_DISABLED_BIT, EVENT_FILE_FL_SOFT_DISABLED_BIT,
EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_MODE_BIT,
EVENT_FILE_FL_TRIGGER_COND_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
}; };
/* /*
...@@ -342,6 +342,7 @@ enum { ...@@ -342,6 +342,7 @@ enum {
* tracepoint may be enabled) * tracepoint may be enabled)
* TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_MODE - When set, invoke the triggers associated with the event
* TRIGGER_COND - When set, one or more triggers has an associated filter * TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
*/ */
enum { enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
...@@ -352,6 +353,7 @@ enum { ...@@ -352,6 +353,7 @@ enum {
EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
}; };
struct trace_event_file { struct trace_event_file {
...@@ -430,6 +432,8 @@ extern enum event_trigger_type event_triggers_call(struct trace_event_file *file ...@@ -430,6 +432,8 @@ extern enum event_trigger_type event_triggers_call(struct trace_event_file *file
extern void event_triggers_post_call(struct trace_event_file *file, extern void event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt); enum event_trigger_type tt);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
/** /**
* trace_trigger_soft_disabled - do triggers and test if soft disabled * trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test * @file: The file pointer of the event to test
...@@ -449,6 +453,8 @@ trace_trigger_soft_disabled(struct trace_event_file *file) ...@@ -449,6 +453,8 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
event_triggers_call(file, NULL); event_triggers_call(file, NULL);
if (eflags & EVENT_FILE_FL_SOFT_DISABLED) if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true; return true;
if (eflags & EVENT_FILE_FL_PID_FILTER)
return trace_event_ignore_this_pid(file);
} }
return false; return false;
} }
...@@ -508,7 +514,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, ...@@ -508,7 +514,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
enum event_trigger_type tt = ETT_NONE; enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit(buffer, event, irq_flags, pc); trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt) if (tt)
event_triggers_post_call(file, tt); event_triggers_post_call(file, tt);
...@@ -540,7 +546,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file, ...@@ -540,7 +546,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
enum event_trigger_type tt = ETT_NONE; enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit_regs(buffer, event, trace_buffer_unlock_commit_regs(file->tr, buffer, event,
irq_flags, pc, regs); irq_flags, pc, regs);
if (tt) if (tt)
......
...@@ -26,6 +26,7 @@ struct notifier_block; ...@@ -26,6 +26,7 @@ struct notifier_block;
struct tracepoint_func { struct tracepoint_func {
void *func; void *func;
void *data; void *data;
int prio;
}; };
struct tracepoint { struct tracepoint {
...@@ -42,9 +43,14 @@ struct trace_enum_map { ...@@ -42,9 +43,14 @@ struct trace_enum_map {
unsigned long enum_value; unsigned long enum_value;
}; };
#define TRACEPOINT_DEFAULT_PRIO 10
extern int extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int extern int
tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
int prio);
extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
extern void extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
...@@ -111,7 +117,18 @@ extern void syscall_unregfunc(void); ...@@ -111,7 +117,18 @@ extern void syscall_unregfunc(void);
#define TP_ARGS(args...) args #define TP_ARGS(args...) args
#define TP_CONDITION(args...) args #define TP_CONDITION(args...) args
#ifdef CONFIG_TRACEPOINTS /*
* Individual subsystem my have a separate configuration to
* enable their tracepoints. By default, this file will create
* the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem
* wants to be able to disable its tracepoints from being created
* it can define NOTRACE before including the tracepoint headers.
*/
#if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE)
#define TRACEPOINTS_ENABLED
#endif
#ifdef TRACEPOINTS_ENABLED
/* /*
* it_func[0] is never NULL because there is at least one element in the array * it_func[0] is never NULL because there is at least one element in the array
...@@ -167,10 +184,11 @@ extern void syscall_unregfunc(void); ...@@ -167,10 +184,11 @@ extern void syscall_unregfunc(void);
* structure. Force alignment to the same alignment as the section start. * structure. Force alignment to the same alignment as the section start.
* *
* When lockdep is enabled, we make sure to always do the RCU portions of * When lockdep is enabled, we make sure to always do the RCU portions of
* the tracepoint code, regardless of whether tracing is on or we match the * the tracepoint code, regardless of whether tracing is on. However,
* condition. This lets us find RCU issues triggered with tracepoints even * don't check if the condition is false, due to interaction with idle
* when this tracepoint is off. This code has no purpose other than poking * instrumentation. This lets us find RCU issues triggered with tracepoints
* RCU a bit. * even when this tracepoint is off. This code has no purpose other than
* poking RCU a bit.
*/ */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
...@@ -196,6 +214,13 @@ extern void syscall_unregfunc(void); ...@@ -196,6 +214,13 @@ extern void syscall_unregfunc(void);
(void *)probe, data); \ (void *)probe, data); \
} \ } \
static inline int \ static inline int \
register_trace_prio_##name(void (*probe)(data_proto), void *data,\
int prio) \
{ \
return tracepoint_probe_register_prio(&__tracepoint_##name, \
(void *)probe, data, prio); \
} \
static inline int \
unregister_trace_##name(void (*probe)(data_proto), void *data) \ unregister_trace_##name(void (*probe)(data_proto), void *data) \
{ \ { \
return tracepoint_probe_unregister(&__tracepoint_##name,\ return tracepoint_probe_unregister(&__tracepoint_##name,\
...@@ -234,7 +259,7 @@ extern void syscall_unregfunc(void); ...@@ -234,7 +259,7 @@ extern void syscall_unregfunc(void);
#define EXPORT_TRACEPOINT_SYMBOL(name) \ #define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name) EXPORT_SYMBOL(__tracepoint_##name)
#else /* !CONFIG_TRACEPOINTS */ #else /* !TRACEPOINTS_ENABLED */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ } \ { } \
...@@ -266,7 +291,7 @@ extern void syscall_unregfunc(void); ...@@ -266,7 +291,7 @@ extern void syscall_unregfunc(void);
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name) #define EXPORT_TRACEPOINT_SYMBOL(name)
#endif /* CONFIG_TRACEPOINTS */ #endif /* TRACEPOINTS_ENABLED */
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
/** /**
......
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
#undef DECLARE_TRACE #undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) #define DECLARE_TRACE(name, proto, args)
#ifdef CONFIG_EVENT_TRACING #ifdef TRACEPOINTS_ENABLED
#include <trace/trace_events.h> #include <trace/trace_events.h>
#include <trace/perf.h> #include <trace/perf.h>
#endif #endif
......
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM gpio #define TRACE_SYSTEM gpio
#ifndef CONFIG_TRACING_EVENTS_GPIO
#define NOTRACE
#endif
#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GPIO_H #define _TRACE_GPIO_H
......
/*
* Stage 4 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
*
* For those macros defined with TRACE_EVENT:
*
* static struct trace_event_call event_<call>;
*
* static void trace_event_raw_event_<call>(void *__data, proto)
* {
* struct trace_event_file *trace_file = __data;
* struct trace_event_call *event_call = trace_file->event_call;
* struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
* unsigned long eflags = trace_file->flags;
* enum event_trigger_type __tt = ETT_NONE;
* struct ring_buffer_event *event;
* struct trace_event_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer;
* unsigned long irq_flags;
* int __data_size;
* int pc;
*
* if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
* if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
* event_triggers_call(trace_file, NULL);
* if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
* return;
* }
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
*
* event = trace_event_buffer_lock_reserve(&buffer, trace_file,
* event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
* return;
* entry = ring_buffer_event_data(event);
*
* { <assign>; } <-- Here we assign the entries by the __field and
* __array macros.
*
* if (eflags & EVENT_FILE_FL_TRIGGER_COND)
* __tt = event_triggers_call(trace_file, entry);
*
* if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
* &trace_file->flags))
* ring_buffer_discard_commit(buffer, event);
* else if (!filter_check_discard(trace_file, entry, buffer, event))
* trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
*
* if (__tt)
* event_triggers_post_call(trace_file, __tt);
* }
*
* static struct trace_event ftrace_event_type_<call> = {
* .trace = trace_raw_output_<call>, <-- stage 2
* };
*
* static char print_fmt_<call>[] = <TP_printk>;
*
* static struct trace_event_class __used event_class_<template> = {
* .system = "<system>",
* .define_fields = trace_event_define_fields_<call>,
* .fields = LIST_HEAD_INIT(event_class_##call.fields),
* .raw_init = trace_event_raw_init,
* .probe = trace_event_raw_event_##call,
* .reg = trace_event_reg,
* };
*
* static struct trace_event_call event_<call> = {
* .class = event_class_<template>,
* {
* .tp = &__tracepoint_<call>,
* },
* .event = &ftrace_event_type_<call>,
* .print_fmt = print_fmt_<call>,
* .flags = TRACE_EVENT_FL_TRACEPOINT,
* };
* // its only safe to use pointers when doing linker tricks to
* // create an array.
* static struct trace_event_call __used
* __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
*
*/
#ifdef CONFIG_PERF_EVENTS
#define _TRACE_PERF_PROTO(call, proto) \
static notrace void \
perf_trace_##call(void *__data, proto);
#define _TRACE_PERF_INIT(call) \
.perf_probe = perf_trace_##call,
#else
#define _TRACE_PERF_PROTO(call, proto)
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
#undef __entry
#define __entry entry
#undef __field
#define __field(type, item)
#undef __field_struct
#define __field_struct(type, item)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__entry->__data_loc_##item = __data_offsets.item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __assign_str
#define __assign_str(dst, src) \
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
#undef __assign_bitmask
#define __assign_bitmask(dst, src, nr_bits) \
memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
#undef TP_fast_assign
#define TP_fast_assign(args...) args
#undef __perf_addr
#define __perf_addr(a) (a)
#undef __perf_count
#define __perf_count(c) (c)
#undef __perf_task
#define __perf_task(t) (t)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
\
static notrace void \
trace_event_raw_event_##call(void *__data, proto) \
{ \
struct trace_event_file *trace_file = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
struct trace_event_buffer fbuffer; \
struct trace_event_raw_##call *entry; \
int __data_size; \
\
if (trace_trigger_soft_disabled(trace_file)) \
return; \
\
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
sizeof(*entry) + __data_size); \
\
if (!entry) \
return; \
\
tstruct \
\
{ assign; } \
\
trace_event_buffer_commit(&fbuffer); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the ftrace probe will
* fail to compile unless it too is updated.
*/
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
static inline void ftrace_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(trace_event_raw_event_##template); \
}
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __entry
#define __entry REC
#undef __print_flags
#undef __print_symbolic
#undef __print_hex
#undef __get_dynamic_array
#undef __get_dynamic_array_len
#undef __get_str
#undef __get_bitmask
#undef __print_array
#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
static char print_fmt_##call[] = print; \
static struct trace_event_class __used __refdata event_class_##call = { \
.system = TRACE_SYSTEM_STRING, \
.define_fields = trace_event_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
.raw_init = trace_event_raw_init, \
.probe = trace_event_raw_event_##call, \
.reg = trace_event_reg, \
_TRACE_PERF_INIT(call) \
};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\
static struct trace_event_call __used event_##call = { \
.class = &event_class_##template, \
{ \
.tp = &__tracepoint_##call, \
}, \
.event.funcs = &trace_event_type_funcs_##template, \
.print_fmt = print_fmt_##template, \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
static char print_fmt_##call[] = print; \
\
static struct trace_event_call __used event_##call = { \
.class = &event_class_##template, \
{ \
.tp = &__tracepoint_##call, \
}, \
.event.funcs = &trace_event_type_funcs_##call, \
.print_fmt = print_fmt_##call, \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef TRACE_SYSTEM_VAR #undef TRACE_SYSTEM_VAR
......
...@@ -506,3 +506,261 @@ static inline notrace int trace_event_get_offsets_##call( \ ...@@ -506,3 +506,261 @@ static inline notrace int trace_event_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 4 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
*
* For those macros defined with TRACE_EVENT:
*
* static struct trace_event_call event_<call>;
*
* static void trace_event_raw_event_<call>(void *__data, proto)
* {
* struct trace_event_file *trace_file = __data;
* struct trace_event_call *event_call = trace_file->event_call;
* struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
* unsigned long eflags = trace_file->flags;
* enum event_trigger_type __tt = ETT_NONE;
* struct ring_buffer_event *event;
* struct trace_event_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer;
* unsigned long irq_flags;
* int __data_size;
* int pc;
*
* if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
* if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
* event_triggers_call(trace_file, NULL);
* if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
* return;
* }
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
*
* event = trace_event_buffer_lock_reserve(&buffer, trace_file,
* event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
* return;
* entry = ring_buffer_event_data(event);
*
* { <assign>; } <-- Here we assign the entries by the __field and
* __array macros.
*
* if (eflags & EVENT_FILE_FL_TRIGGER_COND)
* __tt = event_triggers_call(trace_file, entry);
*
* if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
* &trace_file->flags))
* ring_buffer_discard_commit(buffer, event);
* else if (!filter_check_discard(trace_file, entry, buffer, event))
* trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
*
* if (__tt)
* event_triggers_post_call(trace_file, __tt);
* }
*
* static struct trace_event ftrace_event_type_<call> = {
* .trace = trace_raw_output_<call>, <-- stage 2
* };
*
* static char print_fmt_<call>[] = <TP_printk>;
*
* static struct trace_event_class __used event_class_<template> = {
* .system = "<system>",
* .define_fields = trace_event_define_fields_<call>,
* .fields = LIST_HEAD_INIT(event_class_##call.fields),
* .raw_init = trace_event_raw_init,
* .probe = trace_event_raw_event_##call,
* .reg = trace_event_reg,
* };
*
* static struct trace_event_call event_<call> = {
* .class = event_class_<template>,
* {
* .tp = &__tracepoint_<call>,
* },
* .event = &ftrace_event_type_<call>,
* .print_fmt = print_fmt_<call>,
* .flags = TRACE_EVENT_FL_TRACEPOINT,
* };
* // its only safe to use pointers when doing linker tricks to
* // create an array.
* static struct trace_event_call __used
* __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
*
*/
#ifdef CONFIG_PERF_EVENTS
#define _TRACE_PERF_PROTO(call, proto) \
static notrace void \
perf_trace_##call(void *__data, proto);
#define _TRACE_PERF_INIT(call) \
.perf_probe = perf_trace_##call,
#else
#define _TRACE_PERF_PROTO(call, proto)
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
#undef __entry
#define __entry entry
#undef __field
#define __field(type, item)
#undef __field_struct
#define __field_struct(type, item)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__entry->__data_loc_##item = __data_offsets.item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __assign_str
#define __assign_str(dst, src) \
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
#undef __assign_bitmask
#define __assign_bitmask(dst, src, nr_bits) \
memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
#undef TP_fast_assign
#define TP_fast_assign(args...) args
#undef __perf_addr
#define __perf_addr(a) (a)
#undef __perf_count
#define __perf_count(c) (c)
#undef __perf_task
#define __perf_task(t) (t)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
\
static notrace void \
trace_event_raw_event_##call(void *__data, proto) \
{ \
struct trace_event_file *trace_file = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
struct trace_event_buffer fbuffer; \
struct trace_event_raw_##call *entry; \
int __data_size; \
\
if (trace_trigger_soft_disabled(trace_file)) \
return; \
\
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
sizeof(*entry) + __data_size); \
\
if (!entry) \
return; \
\
tstruct \
\
{ assign; } \
\
trace_event_buffer_commit(&fbuffer); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the ftrace probe will
* fail to compile unless it too is updated.
*/
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
static inline void ftrace_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(trace_event_raw_event_##template); \
}
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __entry
#define __entry REC
#undef __print_flags
#undef __print_symbolic
#undef __print_hex
#undef __get_dynamic_array
#undef __get_dynamic_array_len
#undef __get_str
#undef __get_bitmask
#undef __print_array
#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
static char print_fmt_##call[] = print; \
static struct trace_event_class __used __refdata event_class_##call = { \
.system = TRACE_SYSTEM_STRING, \
.define_fields = trace_event_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
.raw_init = trace_event_raw_init, \
.probe = trace_event_raw_event_##call, \
.reg = trace_event_reg, \
_TRACE_PERF_INIT(call) \
};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\
static struct trace_event_call __used event_##call = { \
.class = &event_class_##template, \
{ \
.tp = &__tracepoint_##call, \
}, \
.event.funcs = &trace_event_type_funcs_##template, \
.print_fmt = print_fmt_##template, \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
static char print_fmt_##call[] = print; \
\
static struct trace_event_call __used event_##call = { \
.class = &event_class_##template, \
{ \
.tp = &__tracepoint_##call, \
}, \
.event.funcs = &trace_event_type_funcs_##call, \
.print_fmt = print_fmt_##call, \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
...@@ -635,6 +635,13 @@ config TRACE_ENUM_MAP_FILE ...@@ -635,6 +635,13 @@ config TRACE_ENUM_MAP_FILE
If unsure, say N If unsure, say N
config TRACING_EVENTS_GPIO
bool "Trace gpio events"
depends on GPIOLIB
default y
help
Enable tracing events for gpio subsystem
endif # FTRACE endif # FTRACE
endif # TRACING_SUPPORT endif # TRACING_SUPPORT
......
...@@ -103,7 +103,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ...@@ -103,7 +103,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
memcpy((void *) t + sizeof(*t), data, len); memcpy((void *) t + sizeof(*t), data, len);
if (blk_tracer) if (blk_tracer)
trace_buffer_unlock_commit(buffer, event, 0, pc); trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
} }
} }
...@@ -278,7 +278,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -278,7 +278,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tracer) { if (blk_tracer) {
trace_buffer_unlock_commit(buffer, event, 0, pc); trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
return; return;
} }
} }
...@@ -1340,6 +1340,7 @@ static const struct { ...@@ -1340,6 +1340,7 @@ static const struct {
static enum print_line_t print_one_line(struct trace_iterator *iter, static enum print_line_t print_one_line(struct trace_iterator *iter,
bool classic) bool classic)
{ {
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
const struct blk_io_trace *t; const struct blk_io_trace *t;
u16 what; u16 what;
...@@ -1348,7 +1349,7 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, ...@@ -1348,7 +1349,7 @@ static enum print_line_t print_one_line(struct trace_iterator *iter,
t = te_blk_io_trace(iter->ent); t = te_blk_io_trace(iter->ent);
what = t->action & ((1 << BLK_TC_SHIFT) - 1); what = t->action & ((1 << BLK_TC_SHIFT) - 1);
long_act = !!(trace_flags & TRACE_ITER_VERBOSE); long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
log_action = classic ? &blk_log_action_classic : &blk_log_action; log_action = classic ? &blk_log_action_classic : &blk_log_action;
if (t->action == BLK_TN_MESSAGE) { if (t->action == BLK_TN_MESSAGE) {
...@@ -1410,9 +1411,9 @@ blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) ...@@ -1410,9 +1411,9 @@ blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
/* don't output context-info for blk_classic output */ /* don't output context-info for blk_classic output */
if (bit == TRACE_BLK_OPT_CLASSIC) { if (bit == TRACE_BLK_OPT_CLASSIC) {
if (set) if (set)
trace_flags &= ~TRACE_ITER_CONTEXT_INFO; tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
else else
trace_flags |= TRACE_ITER_CONTEXT_INFO; tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
} }
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -829,7 +829,7 @@ rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -829,7 +829,7 @@ rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* writer is ever on it, the previous pointer never points * writer is ever on it, the previous pointer never points
* back to the reader page. * back to the reader page.
*/ */
static int rb_is_reader_page(struct buffer_page *page) static bool rb_is_reader_page(struct buffer_page *page)
{ {
struct list_head *list = page->list.prev; struct list_head *list = page->list.prev;
...@@ -2270,7 +2270,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) ...@@ -2270,7 +2270,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
return skip_time_extend(event); return skip_time_extend(event);
} }
static inline int rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
/** /**
...@@ -2498,7 +2498,7 @@ static inline void rb_event_discard(struct ring_buffer_event *event) ...@@ -2498,7 +2498,7 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
event->time_delta = 1; event->time_delta = 1;
} }
static inline int static inline bool
rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
...@@ -3039,7 +3039,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -3039,7 +3039,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
} }
EXPORT_SYMBOL_GPL(ring_buffer_write); EXPORT_SYMBOL_GPL(ring_buffer_write);
static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{ {
struct buffer_page *reader = cpu_buffer->reader_page; struct buffer_page *reader = cpu_buffer->reader_page;
struct buffer_page *head = rb_set_head_page(cpu_buffer); struct buffer_page *head = rb_set_head_page(cpu_buffer);
...@@ -3047,7 +3047,7 @@ static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3047,7 +3047,7 @@ static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
/* In case of error, head will be NULL */ /* In case of error, head will be NULL */
if (unlikely(!head)) if (unlikely(!head))
return 1; return true;
return reader->read == rb_page_commit(reader) && return reader->read == rb_page_commit(reader) &&
(commit == reader || (commit == reader ||
...@@ -4267,7 +4267,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset); ...@@ -4267,7 +4267,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
* rind_buffer_empty - is the ring buffer empty? * rind_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test * @buffer: The ring buffer to test
*/ */
int ring_buffer_empty(struct ring_buffer *buffer) bool ring_buffer_empty(struct ring_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags; unsigned long flags;
...@@ -4285,10 +4285,10 @@ int ring_buffer_empty(struct ring_buffer *buffer) ...@@ -4285,10 +4285,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
local_irq_restore(flags); local_irq_restore(flags);
if (!ret) if (!ret)
return 0; return false;
} }
return 1; return true;
} }
EXPORT_SYMBOL_GPL(ring_buffer_empty); EXPORT_SYMBOL_GPL(ring_buffer_empty);
...@@ -4297,7 +4297,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); ...@@ -4297,7 +4297,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The CPU buffer to test * @cpu: The CPU buffer to test
*/ */
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags; unsigned long flags;
...@@ -4305,7 +4305,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) ...@@ -4305,7 +4305,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
int ret; int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1; return true;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags); local_irq_save(flags);
......
...@@ -24,8 +24,8 @@ struct rb_page { ...@@ -24,8 +24,8 @@ struct rb_page {
static int wakeup_interval = 100; static int wakeup_interval = 100;
static int reader_finish; static int reader_finish;
static struct completion read_start; static DECLARE_COMPLETION(read_start);
static struct completion read_done; static DECLARE_COMPLETION(read_done);
static struct ring_buffer *buffer; static struct ring_buffer *buffer;
static struct task_struct *producer; static struct task_struct *producer;
...@@ -60,12 +60,12 @@ MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); ...@@ -60,12 +60,12 @@ MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
static int read_events; static int read_events;
static int kill_test; static int test_error;
#define KILL_TEST() \ #define TEST_ERROR() \
do { \ do { \
if (!kill_test) { \ if (!test_error) { \
kill_test = 1; \ test_error = 1; \
WARN_ON(1); \ WARN_ON(1); \
} \ } \
} while (0) } while (0)
...@@ -75,6 +75,11 @@ enum event_status { ...@@ -75,6 +75,11 @@ enum event_status {
EVENT_DROPPED, EVENT_DROPPED,
}; };
static bool break_test(void)
{
return test_error || kthread_should_stop();
}
static enum event_status read_event(int cpu) static enum event_status read_event(int cpu)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -87,7 +92,7 @@ static enum event_status read_event(int cpu) ...@@ -87,7 +92,7 @@ static enum event_status read_event(int cpu)
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
if (*entry != cpu) { if (*entry != cpu) {
KILL_TEST(); TEST_ERROR();
return EVENT_DROPPED; return EVENT_DROPPED;
} }
...@@ -115,10 +120,10 @@ static enum event_status read_page(int cpu) ...@@ -115,10 +120,10 @@ static enum event_status read_page(int cpu)
rpage = bpage; rpage = bpage;
/* The commit may have missed event flags set, clear them */ /* The commit may have missed event flags set, clear them */
commit = local_read(&rpage->commit) & 0xfffff; commit = local_read(&rpage->commit) & 0xfffff;
for (i = 0; i < commit && !kill_test; i += inc) { for (i = 0; i < commit && !test_error ; i += inc) {
if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
KILL_TEST(); TEST_ERROR();
break; break;
} }
...@@ -128,7 +133,7 @@ static enum event_status read_page(int cpu) ...@@ -128,7 +133,7 @@ static enum event_status read_page(int cpu)
case RINGBUF_TYPE_PADDING: case RINGBUF_TYPE_PADDING:
/* failed writes may be discarded events */ /* failed writes may be discarded events */
if (!event->time_delta) if (!event->time_delta)
KILL_TEST(); TEST_ERROR();
inc = event->array[0] + 4; inc = event->array[0] + 4;
break; break;
case RINGBUF_TYPE_TIME_EXTEND: case RINGBUF_TYPE_TIME_EXTEND:
...@@ -137,12 +142,12 @@ static enum event_status read_page(int cpu) ...@@ -137,12 +142,12 @@ static enum event_status read_page(int cpu)
case 0: case 0:
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
if (*entry != cpu) { if (*entry != cpu) {
KILL_TEST(); TEST_ERROR();
break; break;
} }
read++; read++;
if (!event->array[0]) { if (!event->array[0]) {
KILL_TEST(); TEST_ERROR();
break; break;
} }
inc = event->array[0] + 4; inc = event->array[0] + 4;
...@@ -150,17 +155,17 @@ static enum event_status read_page(int cpu) ...@@ -150,17 +155,17 @@ static enum event_status read_page(int cpu)
default: default:
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
if (*entry != cpu) { if (*entry != cpu) {
KILL_TEST(); TEST_ERROR();
break; break;
} }
read++; read++;
inc = ((event->type_len + 1) * 4); inc = ((event->type_len + 1) * 4);
} }
if (kill_test) if (test_error)
break; break;
if (inc <= 0) { if (inc <= 0) {
KILL_TEST(); TEST_ERROR();
break; break;
} }
} }
...@@ -178,10 +183,14 @@ static void ring_buffer_consumer(void) ...@@ -178,10 +183,14 @@ static void ring_buffer_consumer(void)
read_events ^= 1; read_events ^= 1;
read = 0; read = 0;
while (!reader_finish && !kill_test) { /*
int found; * Continue running until the producer specifically asks to stop
* and is ready for the completion.
*/
while (!READ_ONCE(reader_finish)) {
int found = 1;
do { while (found && !test_error) {
int cpu; int cpu;
found = 0; found = 0;
...@@ -193,19 +202,25 @@ static void ring_buffer_consumer(void) ...@@ -193,19 +202,25 @@ static void ring_buffer_consumer(void)
else else
stat = read_page(cpu); stat = read_page(cpu);
if (kill_test) if (test_error)
break; break;
if (stat == EVENT_FOUND) if (stat == EVENT_FOUND)
found = 1; found = 1;
}
} }
} while (found && !kill_test);
/* Wait till the producer wakes us up when there is more data
* available or when the producer wants us to finish reading.
*/
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (reader_finish) if (reader_finish)
break; break;
schedule(); schedule();
} }
__set_current_state(TASK_RUNNING);
reader_finish = 0; reader_finish = 0;
complete(&read_done); complete(&read_done);
} }
...@@ -263,10 +278,7 @@ static void ring_buffer_producer(void) ...@@ -263,10 +278,7 @@ static void ring_buffer_producer(void)
if (cnt % wakeup_interval) if (cnt % wakeup_interval)
cond_resched(); cond_resched();
#endif #endif
if (kthread_should_stop()) } while (ktime_before(end_time, timeout) && !break_test());
kill_test = 1;
} while (ktime_before(end_time, timeout) && !kill_test);
trace_printk("End ring buffer hammer\n"); trace_printk("End ring buffer hammer\n");
if (consumer) { if (consumer) {
...@@ -276,8 +288,6 @@ static void ring_buffer_producer(void) ...@@ -276,8 +288,6 @@ static void ring_buffer_producer(void)
/* the completions must be visible before the finish var */ /* the completions must be visible before the finish var */
smp_wmb(); smp_wmb();
reader_finish = 1; reader_finish = 1;
/* finish var visible before waking up the consumer */
smp_wmb();
wake_up_process(consumer); wake_up_process(consumer);
wait_for_completion(&read_done); wait_for_completion(&read_done);
} }
...@@ -287,7 +297,7 @@ static void ring_buffer_producer(void) ...@@ -287,7 +297,7 @@ static void ring_buffer_producer(void)
entries = ring_buffer_entries(buffer); entries = ring_buffer_entries(buffer);
overruns = ring_buffer_overruns(buffer); overruns = ring_buffer_overruns(buffer);
if (kill_test && !kthread_should_stop()) if (test_error)
trace_printk("ERROR!\n"); trace_printk("ERROR!\n");
if (!disable_reader) { if (!disable_reader) {
...@@ -368,15 +378,14 @@ static void wait_to_die(void) ...@@ -368,15 +378,14 @@ static void wait_to_die(void)
static int ring_buffer_consumer_thread(void *arg) static int ring_buffer_consumer_thread(void *arg)
{ {
while (!kthread_should_stop() && !kill_test) { while (!break_test()) {
complete(&read_start); complete(&read_start);
ring_buffer_consumer(); ring_buffer_consumer();
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop() || kill_test) if (break_test())
break; break;
schedule(); schedule();
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -389,27 +398,27 @@ static int ring_buffer_consumer_thread(void *arg) ...@@ -389,27 +398,27 @@ static int ring_buffer_consumer_thread(void *arg)
static int ring_buffer_producer_thread(void *arg) static int ring_buffer_producer_thread(void *arg)
{ {
init_completion(&read_start); while (!break_test()) {
while (!kthread_should_stop() && !kill_test) {
ring_buffer_reset(buffer); ring_buffer_reset(buffer);
if (consumer) { if (consumer) {
smp_wmb();
wake_up_process(consumer); wake_up_process(consumer);
wait_for_completion(&read_start); wait_for_completion(&read_start);
} }
ring_buffer_producer(); ring_buffer_producer();
if (kill_test) if (break_test())
goto out_kill; goto out_kill;
trace_printk("Sleeping for 10 secs\n"); trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (break_test())
goto out_kill;
schedule_timeout(HZ * SLEEP_TIME); schedule_timeout(HZ * SLEEP_TIME);
} }
out_kill: out_kill:
__set_current_state(TASK_RUNNING);
if (!kthread_should_stop()) if (!kthread_should_stop())
wait_to_die(); wait_to_die();
......
This diff is collapsed.
...@@ -71,9 +71,6 @@ enum trace_type { ...@@ -71,9 +71,6 @@ enum trace_type {
tstruct \ tstruct \
} }
#undef TP_ARGS
#define TP_ARGS(args...) args
#undef FTRACE_ENTRY_DUP #undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
...@@ -156,9 +153,12 @@ struct trace_array_cpu { ...@@ -156,9 +153,12 @@ struct trace_array_cpu {
pid_t pid; pid_t pid;
kuid_t uid; kuid_t uid;
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
bool ignore_pid;
}; };
struct tracer; struct tracer;
struct trace_option_dentry;
struct trace_buffer { struct trace_buffer {
struct trace_array *tr; struct trace_array *tr;
...@@ -168,6 +168,19 @@ struct trace_buffer { ...@@ -168,6 +168,19 @@ struct trace_buffer {
int cpu; int cpu;
}; };
#define TRACE_FLAGS_MAX_SIZE 32
struct trace_options {
struct tracer *tracer;
struct trace_option_dentry *topts;
};
struct trace_pid_list {
unsigned int nr_pids;
int order;
pid_t *pids;
};
/* /*
* The trace array - an array of per-CPU trace arrays. This is the * The trace array - an array of per-CPU trace arrays. This is the
* highest level data structure that individual tracers deal with. * highest level data structure that individual tracers deal with.
...@@ -193,6 +206,7 @@ struct trace_array { ...@@ -193,6 +206,7 @@ struct trace_array {
bool allocated_snapshot; bool allocated_snapshot;
unsigned long max_latency; unsigned long max_latency;
#endif #endif
struct trace_pid_list __rcu *filtered_pids;
/* /*
* max_lock is used to protect the swapping of buffers * max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are * when taking a max snapshot. The buffers themselves are
...@@ -216,13 +230,17 @@ struct trace_array { ...@@ -216,13 +230,17 @@ struct trace_array {
#endif #endif
int stop_count; int stop_count;
int clock_id; int clock_id;
int nr_topts;
struct tracer *current_trace; struct tracer *current_trace;
unsigned int trace_flags;
unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
unsigned int flags; unsigned int flags;
raw_spinlock_t start_lock; raw_spinlock_t start_lock;
struct dentry *dir; struct dentry *dir;
struct dentry *options; struct dentry *options;
struct dentry *percpu_dir; struct dentry *percpu_dir;
struct dentry *event_dir; struct dentry *event_dir;
struct trace_options *topts;
struct list_head systems; struct list_head systems;
struct list_head events; struct list_head events;
cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
...@@ -333,6 +351,13 @@ struct tracer_flags { ...@@ -333,6 +351,13 @@ struct tracer_flags {
#define TRACER_OPT(s, b) .name = #s, .bit = b #define TRACER_OPT(s, b) .name = #s, .bit = b
struct trace_option_dentry {
struct tracer_opt *opt;
struct tracer_flags *flags;
struct trace_array *tr;
struct dentry *entry;
};
/** /**
* struct tracer - a specific tracer and its callbacks to interact with tracefs * struct tracer - a specific tracer and its callbacks to interact with tracefs
* @name: the name chosen to select it on the available_tracers file * @name: the name chosen to select it on the available_tracers file
...@@ -611,29 +636,12 @@ void update_max_tr_single(struct trace_array *tr, ...@@ -611,29 +636,12 @@ void update_max_tr_single(struct trace_array *tr,
#endif /* CONFIG_TRACER_MAX_TRACE */ #endif /* CONFIG_TRACER_MAX_TRACE */
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc);
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc, struct pt_regs *regs);
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc); int pc);
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc); int pc);
#else #else
static inline void ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags, int skip, int pc)
{
}
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
unsigned long flags, int skip,
int pc, struct pt_regs *regs)
{
}
static inline void ftrace_trace_userstack(struct ring_buffer *buffer, static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
...@@ -707,8 +715,6 @@ int trace_array_printk_buf(struct ring_buffer *buffer, ...@@ -707,8 +715,6 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
void trace_printk_seq(struct trace_seq *s); void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter); enum print_line_t print_trace_line(struct trace_iterator *iter);
extern unsigned long trace_flags;
extern char trace_find_mark(unsigned long long duration); extern char trace_find_mark(unsigned long long duration);
/* Standard output formatting function used for function return traces */ /* Standard output formatting function used for function return traces */
...@@ -723,9 +729,14 @@ extern char trace_find_mark(unsigned long long duration); ...@@ -723,9 +729,14 @@ extern char trace_find_mark(unsigned long long duration);
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
#define TRACE_GRAPH_PRINT_IRQS 0x40 #define TRACE_GRAPH_PRINT_IRQS 0x40
#define TRACE_GRAPH_PRINT_TAIL 0x80 #define TRACE_GRAPH_PRINT_TAIL 0x80
#define TRACE_GRAPH_SLEEP_TIME 0x100
#define TRACE_GRAPH_GRAPH_TIME 0x200
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
extern void ftrace_graph_sleep_time_control(bool enable);
extern void ftrace_graph_graph_time_control(bool enable);
extern enum print_line_t extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags); print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags); extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
...@@ -859,7 +870,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops); ...@@ -859,7 +870,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops);
#define ftrace_destroy_filter_files(ops) do { } while (0) #define ftrace_destroy_filter_files(ops) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
int ftrace_event_is_function(struct trace_event_call *call); bool ftrace_event_is_function(struct trace_event_call *call);
/* /*
* struct trace_parser - servers for reading the user input separated by spaces * struct trace_parser - servers for reading the user input separated by spaces
...@@ -896,42 +907,94 @@ extern void trace_parser_put(struct trace_parser *parser); ...@@ -896,42 +907,94 @@ extern void trace_parser_put(struct trace_parser *parser);
extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
size_t cnt, loff_t *ppos); size_t cnt, loff_t *ppos);
/*
* Only create function graph options if function graph is configured.
*/
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# define FGRAPH_FLAGS \
C(DISPLAY_GRAPH, "display-graph"),
#else
# define FGRAPH_FLAGS
#endif
#ifdef CONFIG_BRANCH_TRACER
# define BRANCH_FLAGS \
C(BRANCH, "branch"),
#else
# define BRANCH_FLAGS
#endif
#ifdef CONFIG_FUNCTION_TRACER
# define FUNCTION_FLAGS \
C(FUNCTION, "function-trace"),
# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
#else
# define FUNCTION_FLAGS
# define FUNCTION_DEFAULT_FLAGS 0UL
#endif
#ifdef CONFIG_STACKTRACE
# define STACK_FLAGS \
C(STACKTRACE, "stacktrace"),
#else
# define STACK_FLAGS
#endif
/* /*
* trace_iterator_flags is an enumeration that defines bit * trace_iterator_flags is an enumeration that defines bit
* positions into trace_flags that controls the output. * positions into trace_flags that controls the output.
* *
* NOTE: These bits must match the trace_options array in * NOTE: These bits must match the trace_options array in
* trace.c. * trace.c (this macro guarantees it).
*/
#define TRACE_FLAGS \
C(PRINT_PARENT, "print-parent"), \
C(SYM_OFFSET, "sym-offset"), \
C(SYM_ADDR, "sym-addr"), \
C(VERBOSE, "verbose"), \
C(RAW, "raw"), \
C(HEX, "hex"), \
C(BIN, "bin"), \
C(BLOCK, "block"), \
C(PRINTK, "trace_printk"), \
C(ANNOTATE, "annotate"), \
C(USERSTACKTRACE, "userstacktrace"), \
C(SYM_USEROBJ, "sym-userobj"), \
C(PRINTK_MSGONLY, "printk-msg-only"), \
C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
C(LATENCY_FMT, "latency-format"), \
C(RECORD_CMD, "record-cmd"), \
C(OVERWRITE, "overwrite"), \
C(STOP_ON_FREE, "disable_on_free"), \
C(IRQ_INFO, "irq-info"), \
C(MARKERS, "markers"), \
FUNCTION_FLAGS \
FGRAPH_FLAGS \
STACK_FLAGS \
BRANCH_FLAGS
/*
* By defining C, we can make TRACE_FLAGS a list of bit names
* that will define the bits for the flag masks.
*/ */
enum trace_iterator_flags { #undef C
TRACE_ITER_PRINT_PARENT = 0x01, #define C(a, b) TRACE_ITER_##a##_BIT
TRACE_ITER_SYM_OFFSET = 0x02,
TRACE_ITER_SYM_ADDR = 0x04, enum trace_iterator_bits {
TRACE_ITER_VERBOSE = 0x08, TRACE_FLAGS
TRACE_ITER_RAW = 0x10, /* Make sure we don't go more than we have bits for */
TRACE_ITER_HEX = 0x20, TRACE_ITER_LAST_BIT
TRACE_ITER_BIN = 0x40,
TRACE_ITER_BLOCK = 0x80,
TRACE_ITER_STACKTRACE = 0x100,
TRACE_ITER_PRINTK = 0x200,
TRACE_ITER_PREEMPTONLY = 0x400,
TRACE_ITER_BRANCH = 0x800,
TRACE_ITER_ANNOTATE = 0x1000,
TRACE_ITER_USERSTACKTRACE = 0x2000,
TRACE_ITER_SYM_USEROBJ = 0x4000,
TRACE_ITER_PRINTK_MSGONLY = 0x8000,
TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x20000,
TRACE_ITER_SLEEP_TIME = 0x40000,
TRACE_ITER_GRAPH_TIME = 0x80000,
TRACE_ITER_RECORD_CMD = 0x100000,
TRACE_ITER_OVERWRITE = 0x200000,
TRACE_ITER_STOP_ON_FREE = 0x400000,
TRACE_ITER_IRQ_INFO = 0x800000,
TRACE_ITER_MARKERS = 0x1000000,
TRACE_ITER_FUNCTION = 0x2000000,
}; };
/*
* By redefining C, we can make TRACE_FLAGS a list of masks that
* use the bits as defined above.
*/
#undef C
#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
enum trace_iterator_flags { TRACE_FLAGS };
/* /*
* TRACE_ITER_SYM_MASK masks the options in trace_flags that * TRACE_ITER_SYM_MASK masks the options in trace_flags that
* control the output of kernel symbols. * control the output of kernel symbols.
...@@ -946,7 +1009,7 @@ extern int enable_branch_tracing(struct trace_array *tr); ...@@ -946,7 +1009,7 @@ extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void); extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr) static inline int trace_branch_enable(struct trace_array *tr)
{ {
if (trace_flags & TRACE_ITER_BRANCH) if (tr->trace_flags & TRACE_ITER_BRANCH)
return enable_branch_tracing(tr); return enable_branch_tracing(tr);
return 0; return 0;
} }
...@@ -1269,6 +1332,7 @@ extern const char *__stop___trace_bprintk_fmt[]; ...@@ -1269,6 +1332,7 @@ extern const char *__stop___trace_bprintk_fmt[];
extern const char *__start___tracepoint_str[]; extern const char *__start___tracepoint_str[];
extern const char *__stop___tracepoint_str[]; extern const char *__stop___tracepoint_str[];
void trace_printk_control(bool enabled);
void trace_printk_init_buffers(void); void trace_printk_init_buffers(void);
void trace_printk_start_comm(void); void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
......
...@@ -43,7 +43,7 @@ static void trace_do_benchmark(void) ...@@ -43,7 +43,7 @@ static void trace_do_benchmark(void)
unsigned int std = 0; unsigned int std = 0;
/* Only run if the tracepoint is actually active */ /* Only run if the tracepoint is actually active */
if (!trace_benchmark_event_enabled()) if (!trace_benchmark_event_enabled() || !tracing_is_on())
return; return;
local_irq_disable(); local_irq_disable();
......
...@@ -125,25 +125,14 @@ void disable_branch_tracing(void) ...@@ -125,25 +125,14 @@ void disable_branch_tracing(void)
mutex_unlock(&branch_tracing_mutex); mutex_unlock(&branch_tracing_mutex);
} }
static void start_branch_trace(struct trace_array *tr)
{
enable_branch_tracing(tr);
}
static void stop_branch_trace(struct trace_array *tr)
{
disable_branch_tracing();
}
static int branch_trace_init(struct trace_array *tr) static int branch_trace_init(struct trace_array *tr)
{ {
start_branch_trace(tr); return enable_branch_tracing(tr);
return 0;
} }
static void branch_trace_reset(struct trace_array *tr) static void branch_trace_reset(struct trace_array *tr)
{ {
stop_branch_trace(tr); disable_branch_tracing();
} }
static enum print_line_t trace_branch_print(struct trace_iterator *iter, static enum print_line_t trace_branch_print(struct trace_iterator *iter,
......
This diff is collapsed.
...@@ -973,15 +973,15 @@ static bool is_string_field(struct ftrace_event_field *field) ...@@ -973,15 +973,15 @@ static bool is_string_field(struct ftrace_event_field *field)
field->filter_type == FILTER_PTR_STRING; field->filter_type == FILTER_PTR_STRING;
} }
static int is_legal_op(struct ftrace_event_field *field, int op) static bool is_legal_op(struct ftrace_event_field *field, int op)
{ {
if (is_string_field(field) && if (is_string_field(field) &&
(op != OP_EQ && op != OP_NE && op != OP_GLOB)) (op != OP_EQ && op != OP_NE && op != OP_GLOB))
return 0; return false;
if (!is_string_field(field) && op == OP_GLOB) if (!is_string_field(field) && op == OP_GLOB)
return 0; return false;
return 1; return true;
} }
static filter_pred_fn_t select_comparison_fn(int op, int field_size, static filter_pred_fn_t select_comparison_fn(int op, int field_size,
......
...@@ -187,7 +187,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; ...@@ -187,7 +187,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
FTRACE_ENTRY_REG(call, struct_name, etype, \ FTRACE_ENTRY_REG(call, struct_name, etype, \
PARAMS(tstruct), PARAMS(print), filter, NULL) PARAMS(tstruct), PARAMS(print), filter, NULL)
int ftrace_event_is_function(struct trace_event_call *call) bool ftrace_event_is_function(struct trace_event_call *call)
{ {
return call == &event_function; return call == &event_function;
} }
......
...@@ -83,13 +83,18 @@ static struct tracer_opt trace_opts[] = { ...@@ -83,13 +83,18 @@ static struct tracer_opt trace_opts[] = {
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
/* Display function name after trailing } */ /* Display function name after trailing } */
{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
/* Include sleep time (scheduled out) between entry and return */
{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
/* Include time within nested functions */
{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
{ } /* Empty entry */ { } /* Empty entry */
}; };
static struct tracer_flags tracer_flags = { static struct tracer_flags tracer_flags = {
/* Don't display overruns, proc, or tail by default */ /* Don't display overruns, proc, or tail by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
.opts = trace_opts .opts = trace_opts
}; };
...@@ -107,8 +112,8 @@ enum { ...@@ -107,8 +112,8 @@ enum {
}; };
static void static void
print_graph_duration(unsigned long long duration, struct trace_seq *s, print_graph_duration(struct trace_array *tr, unsigned long long duration,
u32 flags); struct trace_seq *s, u32 flags);
/* Add a function return address to the trace stack on thread info.*/ /* Add a function return address to the trace stack on thread info.*/
int int
...@@ -653,6 +658,7 @@ static void ...@@ -653,6 +658,7 @@ static void
print_graph_irq(struct trace_iterator *iter, unsigned long addr, print_graph_irq(struct trace_iterator *iter, unsigned long addr,
enum trace_type type, int cpu, pid_t pid, u32 flags) enum trace_type type, int cpu, pid_t pid, u32 flags)
{ {
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent; struct trace_entry *ent = iter->ent;
...@@ -660,7 +666,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, ...@@ -660,7 +666,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
addr >= (unsigned long)__irqentry_text_end) addr >= (unsigned long)__irqentry_text_end)
return; return;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) { if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
/* Absolute time */ /* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
print_graph_abs_time(iter->ts, s); print_graph_abs_time(iter->ts, s);
...@@ -676,19 +682,19 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, ...@@ -676,19 +682,19 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
} }
/* Latency format */ /* Latency format */
if (trace_flags & TRACE_ITER_LATENCY_FMT) if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
print_graph_lat_fmt(s, ent); print_graph_lat_fmt(s, ent);
} }
/* No overhead */ /* No overhead */
print_graph_duration(0, s, flags | FLAGS_FILL_START); print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
if (type == TRACE_GRAPH_ENT) if (type == TRACE_GRAPH_ENT)
trace_seq_puts(s, "==========>"); trace_seq_puts(s, "==========>");
else else
trace_seq_puts(s, "<=========="); trace_seq_puts(s, "<==========");
print_graph_duration(0, s, flags | FLAGS_FILL_END); print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
trace_seq_putc(s, '\n'); trace_seq_putc(s, '\n');
} }
...@@ -726,11 +732,11 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) ...@@ -726,11 +732,11 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
} }
static void static void
print_graph_duration(unsigned long long duration, struct trace_seq *s, print_graph_duration(struct trace_array *tr, unsigned long long duration,
u32 flags) struct trace_seq *s, u32 flags)
{ {
if (!(flags & TRACE_GRAPH_PRINT_DURATION) || if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
!(trace_flags & TRACE_ITER_CONTEXT_INFO)) !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
return; return;
/* No real adata, just filling the column with spaces */ /* No real adata, just filling the column with spaces */
...@@ -764,6 +770,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, ...@@ -764,6 +770,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct trace_seq *s, u32 flags) struct trace_seq *s, u32 flags)
{ {
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
struct ftrace_graph_ret *graph_ret; struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call; struct ftrace_graph_ent *call;
unsigned long long duration; unsigned long long duration;
...@@ -792,7 +799,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, ...@@ -792,7 +799,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
} }
/* Overhead and duration */ /* Overhead and duration */
print_graph_duration(duration, s, flags); print_graph_duration(tr, duration, s, flags);
/* Function */ /* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
...@@ -810,6 +817,7 @@ print_graph_entry_nested(struct trace_iterator *iter, ...@@ -810,6 +817,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
{ {
struct ftrace_graph_ent *call = &entry->graph_ent; struct ftrace_graph_ent *call = &entry->graph_ent;
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
int i; int i;
if (data) { if (data) {
...@@ -825,7 +833,7 @@ print_graph_entry_nested(struct trace_iterator *iter, ...@@ -825,7 +833,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
} }
/* No time */ /* No time */
print_graph_duration(0, s, flags | FLAGS_FILL_FULL); print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
/* Function */ /* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
...@@ -849,6 +857,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, ...@@ -849,6 +857,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
{ {
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
struct trace_entry *ent = iter->ent; struct trace_entry *ent = iter->ent;
struct trace_array *tr = iter->tr;
int cpu = iter->cpu; int cpu = iter->cpu;
/* Pid */ /* Pid */
...@@ -858,7 +867,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, ...@@ -858,7 +867,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
/* Interrupt */ /* Interrupt */
print_graph_irq(iter, addr, type, cpu, ent->pid, flags); print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
return; return;
/* Absolute time */ /* Absolute time */
...@@ -876,7 +885,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, ...@@ -876,7 +885,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
} }
/* Latency format */ /* Latency format */
if (trace_flags & TRACE_ITER_LATENCY_FMT) if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
print_graph_lat_fmt(s, ent); print_graph_lat_fmt(s, ent);
return; return;
...@@ -1027,6 +1036,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -1027,6 +1036,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
{ {
unsigned long long duration = trace->rettime - trace->calltime; unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
pid_t pid = ent->pid; pid_t pid = ent->pid;
int cpu = iter->cpu; int cpu = iter->cpu;
int func_match = 1; int func_match = 1;
...@@ -1058,7 +1068,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -1058,7 +1068,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
print_graph_prologue(iter, s, 0, 0, flags); print_graph_prologue(iter, s, 0, 0, flags);
/* Overhead and duration */ /* Overhead and duration */
print_graph_duration(duration, s, flags); print_graph_duration(tr, duration, s, flags);
/* Closing brace */ /* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
...@@ -1091,7 +1101,8 @@ static enum print_line_t ...@@ -1091,7 +1101,8 @@ static enum print_line_t
print_graph_comment(struct trace_seq *s, struct trace_entry *ent, print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
struct trace_iterator *iter, u32 flags) struct trace_iterator *iter, u32 flags)
{ {
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); struct trace_array *tr = iter->tr;
unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
struct trace_event *event; struct trace_event *event;
int depth = 0; int depth = 0;
...@@ -1104,7 +1115,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, ...@@ -1104,7 +1115,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
print_graph_prologue(iter, s, 0, 0, flags); print_graph_prologue(iter, s, 0, 0, flags);
/* No time */ /* No time */
print_graph_duration(0, s, flags | FLAGS_FILL_FULL); print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
/* Indentation */ /* Indentation */
if (depth > 0) if (depth > 0)
...@@ -1245,9 +1256,10 @@ static void print_lat_header(struct seq_file *s, u32 flags) ...@@ -1245,9 +1256,10 @@ static void print_lat_header(struct seq_file *s, u32 flags)
seq_printf(s, "#%.*s||| / \n", size, spaces); seq_printf(s, "#%.*s||| / \n", size, spaces);
} }
static void __print_graph_headers_flags(struct seq_file *s, u32 flags) static void __print_graph_headers_flags(struct trace_array *tr,
struct seq_file *s, u32 flags)
{ {
int lat = trace_flags & TRACE_ITER_LATENCY_FMT; int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
if (lat) if (lat)
print_lat_header(s, flags); print_lat_header(s, flags);
...@@ -1289,11 +1301,12 @@ static void print_graph_headers(struct seq_file *s) ...@@ -1289,11 +1301,12 @@ static void print_graph_headers(struct seq_file *s)
void print_graph_headers_flags(struct seq_file *s, u32 flags) void print_graph_headers_flags(struct seq_file *s, u32 flags)
{ {
struct trace_iterator *iter = s->private; struct trace_iterator *iter = s->private;
struct trace_array *tr = iter->tr;
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
return; return;
if (trace_flags & TRACE_ITER_LATENCY_FMT) { if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
/* print nothing if the buffers are empty */ /* print nothing if the buffers are empty */
if (trace_empty(iter)) if (trace_empty(iter))
return; return;
...@@ -1301,7 +1314,7 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags) ...@@ -1301,7 +1314,7 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
print_trace_header(s, iter); print_trace_header(s, iter);
} }
__print_graph_headers_flags(s, flags); __print_graph_headers_flags(tr, s, flags);
} }
void graph_trace_open(struct trace_iterator *iter) void graph_trace_open(struct trace_iterator *iter)
...@@ -1362,6 +1375,12 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) ...@@ -1362,6 +1375,12 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
if (bit == TRACE_GRAPH_PRINT_IRQS) if (bit == TRACE_GRAPH_PRINT_IRQS)
ftrace_graph_skip_irqs = !set; ftrace_graph_skip_irqs = !set;
if (bit == TRACE_GRAPH_SLEEP_TIME)
ftrace_graph_sleep_time_control(set);
if (bit == TRACE_GRAPH_GRAPH_TIME)
ftrace_graph_graph_time_control(set);
return 0; return 0;
} }
......
...@@ -31,7 +31,6 @@ enum { ...@@ -31,7 +31,6 @@ enum {
static int trace_type __read_mostly; static int trace_type __read_mostly;
static int save_flags; static int save_flags;
static bool function_enabled;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph);
...@@ -57,22 +56,16 @@ irq_trace(void) ...@@ -57,22 +56,16 @@ irq_trace(void)
# define irq_trace() (0) # define irq_trace() (0)
#endif #endif
#define TRACE_DISPLAY_GRAPH 1
static struct tracer_opt trace_opts[] = {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* display latency trace as call graph */ static int irqsoff_display_graph(struct trace_array *tr, int set);
{ TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
#else
static inline int irqsoff_display_graph(struct trace_array *tr, int set)
{
return -EINVAL;
}
# define is_graph(tr) false
#endif #endif
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
.val = 0,
.opts = trace_opts,
};
#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
/* /*
* Sequence count - we record it when starting a measurement and * Sequence count - we record it when starting a measurement and
...@@ -152,15 +145,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -152,15 +145,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int static int irqsoff_display_graph(struct trace_array *tr, int set)
irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{ {
int cpu; int cpu;
if (!(bit & TRACE_DISPLAY_GRAPH)) if (!(is_graph(tr) ^ set))
return -EINVAL;
if (!(is_graph() ^ set))
return 0; return 0;
stop_irqsoff_tracer(irqsoff_trace, !set); stop_irqsoff_tracer(irqsoff_trace, !set);
...@@ -209,7 +198,7 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace) ...@@ -209,7 +198,7 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
static void irqsoff_trace_open(struct trace_iterator *iter) static void irqsoff_trace_open(struct trace_iterator *iter)
{ {
if (is_graph()) if (is_graph(iter->tr))
graph_trace_open(iter); graph_trace_open(iter);
} }
...@@ -231,7 +220,7 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) ...@@ -231,7 +220,7 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
* In graph mode call the graph tracer output function, * In graph mode call the graph tracer output function,
* otherwise go with the TRACE_FN event handler * otherwise go with the TRACE_FN event handler
*/ */
if (is_graph()) if (is_graph(iter->tr))
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
...@@ -239,7 +228,9 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) ...@@ -239,7 +228,9 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
static void irqsoff_print_header(struct seq_file *s) static void irqsoff_print_header(struct seq_file *s)
{ {
if (is_graph()) struct trace_array *tr = irqsoff_trace;
if (is_graph(tr))
print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
else else
trace_default_header(s); trace_default_header(s);
...@@ -250,7 +241,7 @@ __trace_function(struct trace_array *tr, ...@@ -250,7 +241,7 @@ __trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
if (is_graph()) if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, flags, pc); trace_graph_function(tr, ip, parent_ip, flags, pc);
else else
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
...@@ -259,27 +250,23 @@ __trace_function(struct trace_array *tr, ...@@ -259,27 +250,23 @@ __trace_function(struct trace_array *tr,
#else #else
#define __trace_function trace_function #define __trace_function trace_function
static int #ifdef CONFIG_FUNCTION_TRACER
irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{
return -EINVAL;
}
static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
{ {
return -1; return -1;
} }
#endif
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{ {
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
} }
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { }
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
static void irqsoff_print_header(struct seq_file *s) static void irqsoff_print_header(struct seq_file *s)
{ {
trace_default_header(s); trace_default_header(s);
...@@ -295,16 +282,16 @@ static void irqsoff_print_header(struct seq_file *s) ...@@ -295,16 +282,16 @@ static void irqsoff_print_header(struct seq_file *s)
/* /*
* Should this new latency be reported/recorded? * Should this new latency be reported/recorded?
*/ */
static int report_latency(struct trace_array *tr, cycle_t delta) static bool report_latency(struct trace_array *tr, cycle_t delta)
{ {
if (tracing_thresh) { if (tracing_thresh) {
if (delta < tracing_thresh) if (delta < tracing_thresh)
return 0; return false;
} else { } else {
if (delta <= tr->max_latency) if (delta <= tr->max_latency)
return 0; return false;
} }
return 1; return true;
} }
static void static void
...@@ -523,12 +510,15 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) ...@@ -523,12 +510,15 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
} }
#endif /* CONFIG_PREEMPT_TRACER */ #endif /* CONFIG_PREEMPT_TRACER */
#ifdef CONFIG_FUNCTION_TRACER
static bool function_enabled;
static int register_irqsoff_function(struct trace_array *tr, int graph, int set) static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{ {
int ret; int ret;
/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
return 0; return 0;
if (graph) if (graph)
...@@ -556,20 +546,40 @@ static void unregister_irqsoff_function(struct trace_array *tr, int graph) ...@@ -556,20 +546,40 @@ static void unregister_irqsoff_function(struct trace_array *tr, int graph)
function_enabled = false; function_enabled = false;
} }
static void irqsoff_function_set(struct trace_array *tr, int set) static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
{ {
if (!(mask & TRACE_ITER_FUNCTION))
return 0;
if (set) if (set)
register_irqsoff_function(tr, is_graph(), 1); register_irqsoff_function(tr, is_graph(tr), 1);
else else
unregister_irqsoff_function(tr, is_graph()); unregister_irqsoff_function(tr, is_graph(tr));
return 1;
}
#else
static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{
return 0;
}
static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
{
return 0;
} }
#endif /* CONFIG_FUNCTION_TRACER */
static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
{ {
struct tracer *tracer = tr->current_trace; struct tracer *tracer = tr->current_trace;
if (mask & TRACE_ITER_FUNCTION) if (irqsoff_function_set(tr, mask, set))
irqsoff_function_set(tr, set); return 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (mask & TRACE_ITER_DISPLAY_GRAPH)
return irqsoff_display_graph(tr, set);
#endif
return trace_keep_overwrite(tracer, mask, set); return trace_keep_overwrite(tracer, mask, set);
} }
...@@ -602,7 +612,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr) ...@@ -602,7 +612,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
if (irqsoff_busy) if (irqsoff_busy)
return -EBUSY; return -EBUSY;
save_flags = trace_flags; save_flags = tr->trace_flags;
/* non overwrite screws up the latency tracers */ /* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
...@@ -618,7 +628,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr) ...@@ -618,7 +628,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
/* Only toplevel instance supports graph tracing */ /* Only toplevel instance supports graph tracing */
if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
is_graph()))) is_graph(tr))))
printk(KERN_ERR "failed to start irqsoff tracer\n"); printk(KERN_ERR "failed to start irqsoff tracer\n");
irqsoff_busy = true; irqsoff_busy = true;
...@@ -630,7 +640,7 @@ static void irqsoff_tracer_reset(struct trace_array *tr) ...@@ -630,7 +640,7 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
stop_irqsoff_tracer(tr, is_graph()); stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
...@@ -666,8 +676,6 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -666,8 +676,6 @@ static struct tracer irqsoff_tracer __read_mostly =
.print_max = true, .print_max = true,
.print_header = irqsoff_print_header, .print_header = irqsoff_print_header,
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
.flag_changed = irqsoff_flag_changed, .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff, .selftest = trace_selftest_startup_irqsoff,
...@@ -700,8 +708,6 @@ static struct tracer preemptoff_tracer __read_mostly = ...@@ -700,8 +708,6 @@ static struct tracer preemptoff_tracer __read_mostly =
.print_max = true, .print_max = true,
.print_header = irqsoff_print_header, .print_header = irqsoff_print_header,
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
.flag_changed = irqsoff_flag_changed, .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff, .selftest = trace_selftest_startup_preemptoff,
...@@ -736,8 +742,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly = ...@@ -736,8 +742,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.print_max = true, .print_max = true,
.print_header = irqsoff_print_header, .print_header = irqsoff_print_header,
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
.flag_changed = irqsoff_flag_changed, .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff, .selftest = trace_selftest_startup_preemptirqsoff,
......
...@@ -21,20 +21,22 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -21,20 +21,22 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
/* use static because iter can be a bit big for the stack */ /* use static because iter can be a bit big for the stack */
static struct trace_iterator iter; static struct trace_iterator iter;
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
struct trace_array *tr;
unsigned int old_userobj; unsigned int old_userobj;
int cnt = 0, cpu; int cnt = 0, cpu;
trace_init_global_iter(&iter); trace_init_global_iter(&iter);
iter.buffer_iter = buffer_iter; iter.buffer_iter = buffer_iter;
tr = iter.tr;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
} }
old_userobj = trace_flags; old_userobj = tr->trace_flags;
/* don't look at user memory in panic mode */ /* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ; tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n"); kdb_printf("Dumping ftrace buffer:\n");
...@@ -82,7 +84,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -82,7 +84,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
kdb_printf("---------------------------------\n"); kdb_printf("---------------------------------\n");
out: out:
trace_flags = old_userobj; tr->trace_flags = old_userobj;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
......
...@@ -314,7 +314,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -314,7 +314,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry->rw = *rw; entry->rw = *rw;
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc); trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
} }
void mmio_trace_rw(struct mmiotrace_rw *rw) void mmio_trace_rw(struct mmiotrace_rw *rw)
...@@ -344,7 +344,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -344,7 +344,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry->map = *map; entry->map = *map;
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc); trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
} }
void mmio_trace_mapping(struct mmiotrace_map *map) void mmio_trace_mapping(struct mmiotrace_map *map)
......
...@@ -322,7 +322,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, ...@@ -322,7 +322,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
# define IP_FMT "%016lx" # define IP_FMT "%016lx"
#endif #endif
int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
unsigned long ip, unsigned long sym_flags) unsigned long ip, unsigned long sym_flags)
{ {
struct file *file = NULL; struct file *file = NULL;
...@@ -354,50 +354,6 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, ...@@ -354,50 +354,6 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
return !trace_seq_has_overflowed(s); return !trace_seq_has_overflowed(s);
} }
int
seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
unsigned long sym_flags)
{
struct mm_struct *mm = NULL;
unsigned int i;
if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
struct task_struct *task;
/*
* we do the lookup on the thread group leader,
* since individual threads might have already quit!
*/
rcu_read_lock();
task = find_task_by_vpid(entry->tgid);
if (task)
mm = get_task_mm(task);
rcu_read_unlock();
}
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
unsigned long ip = entry->caller[i];
if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
break;
trace_seq_puts(s, " => ");
if (!ip) {
trace_seq_puts(s, "??");
trace_seq_putc(s, '\n');
continue;
}
seq_print_user_ip(s, mm, ip, sym_flags);
trace_seq_putc(s, '\n');
}
if (mm)
mmput(mm);
return !trace_seq_has_overflowed(s);
}
int int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
{ {
...@@ -520,7 +476,8 @@ char trace_find_mark(unsigned long long d) ...@@ -520,7 +476,8 @@ char trace_find_mark(unsigned long long d)
static int static int
lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{ {
unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; struct trace_array *tr = iter->tr;
unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts; unsigned long long rel_ts = next_ts - iter->ts;
...@@ -563,6 +520,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) ...@@ -563,6 +520,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
int trace_print_context(struct trace_iterator *iter) int trace_print_context(struct trace_iterator *iter)
{ {
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent; struct trace_entry *entry = iter->ent;
unsigned long long t; unsigned long long t;
...@@ -574,7 +532,7 @@ int trace_print_context(struct trace_iterator *iter) ...@@ -574,7 +532,7 @@ int trace_print_context(struct trace_iterator *iter)
trace_seq_printf(s, "%16s-%-5d [%03d] ", trace_seq_printf(s, "%16s-%-5d [%03d] ",
comm, entry->pid, iter->cpu); comm, entry->pid, iter->cpu);
if (trace_flags & TRACE_ITER_IRQ_INFO) if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
trace_print_lat_fmt(s, entry); trace_print_lat_fmt(s, entry);
if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
...@@ -590,14 +548,15 @@ int trace_print_context(struct trace_iterator *iter) ...@@ -590,14 +548,15 @@ int trace_print_context(struct trace_iterator *iter)
int trace_print_lat_context(struct trace_iterator *iter) int trace_print_lat_context(struct trace_iterator *iter)
{ {
u64 next_ts; struct trace_array *tr = iter->tr;
/* trace_find_next_entry will reset ent_size */ /* trace_find_next_entry will reset ent_size */
int ent_size = iter->ent_size; int ent_size = iter->ent_size;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
u64 next_ts;
struct trace_entry *entry = iter->ent, struct trace_entry *entry = iter->ent,
*next_entry = trace_find_next_entry(iter, NULL, *next_entry = trace_find_next_entry(iter, NULL,
&next_ts); &next_ts);
unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
/* Restore the original ent_size */ /* Restore the original ent_size */
iter->ent_size = ent_size; iter->ent_size = ent_size;
...@@ -1079,13 +1038,49 @@ static struct trace_event trace_stack_event = { ...@@ -1079,13 +1038,49 @@ static struct trace_event trace_stack_event = {
static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
int flags, struct trace_event *event) int flags, struct trace_event *event)
{ {
struct trace_array *tr = iter->tr;
struct userstack_entry *field; struct userstack_entry *field;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct mm_struct *mm = NULL;
unsigned int i;
trace_assign_type(field, iter->ent); trace_assign_type(field, iter->ent);
trace_seq_puts(s, "<user stack trace>\n"); trace_seq_puts(s, "<user stack trace>\n");
seq_print_userip_objs(field, s, flags);
if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
struct task_struct *task;
/*
* we do the lookup on the thread group leader,
* since individual threads might have already quit!
*/
rcu_read_lock();
task = find_task_by_vpid(field->tgid);
if (task)
mm = get_task_mm(task);
rcu_read_unlock();
}
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
unsigned long ip = field->caller[i];
if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
break;
trace_seq_puts(s, " => ");
if (!ip) {
trace_seq_puts(s, "??");
trace_seq_putc(s, '\n');
continue;
}
seq_print_user_ip(s, mm, ip, flags);
trace_seq_putc(s, '\n');
}
if (mm)
mmput(mm);
return trace_handle_return(s); return trace_handle_return(s);
} }
......
...@@ -14,10 +14,6 @@ trace_print_printk_msg_only(struct trace_iterator *iter); ...@@ -14,10 +14,6 @@ trace_print_printk_msg_only(struct trace_iterator *iter);
extern int extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags); unsigned long sym_flags);
extern int seq_print_userip_objs(const struct userstack_entry *entry,
struct trace_seq *s, unsigned long sym_flags);
extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
unsigned long ip, unsigned long sym_flags);
extern int trace_print_context(struct trace_iterator *iter); extern int trace_print_context(struct trace_iterator *iter);
extern int trace_print_lat_context(struct trace_iterator *iter); extern int trace_print_lat_context(struct trace_iterator *iter);
......
...@@ -178,6 +178,12 @@ static inline void format_mod_start(void) { } ...@@ -178,6 +178,12 @@ static inline void format_mod_start(void) { }
static inline void format_mod_stop(void) { } static inline void format_mod_stop(void) { }
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
static bool __read_mostly trace_printk_enabled = true;
void trace_printk_control(bool enabled)
{
trace_printk_enabled = enabled;
}
__initdata_or_module static __initdata_or_module static
struct notifier_block module_trace_bprintk_format_nb = { struct notifier_block module_trace_bprintk_format_nb = {
...@@ -192,7 +198,7 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...) ...@@ -192,7 +198,7 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...)
if (unlikely(!fmt)) if (unlikely(!fmt))
return 0; return 0;
if (!(trace_flags & TRACE_ITER_PRINTK)) if (!trace_printk_enabled)
return 0; return 0;
va_start(ap, fmt); va_start(ap, fmt);
...@@ -207,7 +213,7 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap) ...@@ -207,7 +213,7 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
if (unlikely(!fmt)) if (unlikely(!fmt))
return 0; return 0;
if (!(trace_flags & TRACE_ITER_PRINTK)) if (!trace_printk_enabled)
return 0; return 0;
return trace_vbprintk(ip, fmt, ap); return trace_vbprintk(ip, fmt, ap);
...@@ -219,7 +225,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...) ...@@ -219,7 +225,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...)
int ret; int ret;
va_list ap; va_list ap;
if (!(trace_flags & TRACE_ITER_PRINTK)) if (!trace_printk_enabled)
return 0; return 0;
va_start(ap, fmt); va_start(ap, fmt);
...@@ -231,7 +237,7 @@ EXPORT_SYMBOL_GPL(__trace_printk); ...@@ -231,7 +237,7 @@ EXPORT_SYMBOL_GPL(__trace_printk);
int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
{ {
if (!(trace_flags & TRACE_ITER_PRINTK)) if (!trace_printk_enabled)
return 0; return 0;
return trace_vprintk(ip, fmt, ap); return trace_vprintk(ip, fmt, ap);
......
...@@ -302,15 +302,15 @@ static nokprobe_inline void call_fetch(struct fetch_param *fprm, ...@@ -302,15 +302,15 @@ static nokprobe_inline void call_fetch(struct fetch_param *fprm,
} }
/* Check the name is good for event/group/fields */ /* Check the name is good for event/group/fields */
static inline int is_good_name(const char *name) static inline bool is_good_name(const char *name)
{ {
if (!isalpha(*name) && *name != '_') if (!isalpha(*name) && *name != '_')
return 0; return false;
while (*++name != '\0') { while (*++name != '\0') {
if (!isalpha(*name) && !isdigit(*name) && *name != '_') if (!isalpha(*name) && !isdigit(*name) && *name != '_')
return 0; return false;
} }
return 1; return true;
} }
static inline struct event_file_link * static inline struct event_file_link *
......
...@@ -34,31 +34,28 @@ static arch_spinlock_t wakeup_lock = ...@@ -34,31 +34,28 @@ static arch_spinlock_t wakeup_lock =
static void wakeup_reset(struct trace_array *tr); static void wakeup_reset(struct trace_array *tr);
static void __wakeup_reset(struct trace_array *tr); static void __wakeup_reset(struct trace_array *tr);
static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
static void wakeup_graph_return(struct ftrace_graph_ret *trace);
static int save_flags; static int save_flags;
static bool function_enabled;
#define TRACE_DISPLAY_GRAPH 1
static struct tracer_opt trace_opts[] = {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* display latency trace as call graph */ static int wakeup_display_graph(struct trace_array *tr, int set);
{ TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
#else
static inline int wakeup_display_graph(struct trace_array *tr, int set)
{
return 0;
}
# define is_graph(tr) false
#endif #endif
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
.val = 0,
.opts = trace_opts,
};
#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
static void wakeup_graph_return(struct ftrace_graph_ret *trace);
static bool function_enabled;
/* /*
* Prologue for the wakeup function tracers. * Prologue for the wakeup function tracers.
* *
...@@ -128,14 +125,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -128,14 +125,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
} }
#endif /* CONFIG_FUNCTION_TRACER */
static int register_wakeup_function(struct trace_array *tr, int graph, int set) static int register_wakeup_function(struct trace_array *tr, int graph, int set)
{ {
int ret; int ret;
/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
return 0; return 0;
if (graph) if (graph)
...@@ -163,20 +159,40 @@ static void unregister_wakeup_function(struct trace_array *tr, int graph) ...@@ -163,20 +159,40 @@ static void unregister_wakeup_function(struct trace_array *tr, int graph)
function_enabled = false; function_enabled = false;
} }
static void wakeup_function_set(struct trace_array *tr, int set) static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
{ {
if (!(mask & TRACE_ITER_FUNCTION))
return 0;
if (set) if (set)
register_wakeup_function(tr, is_graph(), 1); register_wakeup_function(tr, is_graph(tr), 1);
else else
unregister_wakeup_function(tr, is_graph()); unregister_wakeup_function(tr, is_graph(tr));
return 1;
}
#else
static int register_wakeup_function(struct trace_array *tr, int graph, int set)
{
return 0;
}
static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
{
return 0;
} }
#endif /* CONFIG_FUNCTION_TRACER */
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
{ {
struct tracer *tracer = tr->current_trace; struct tracer *tracer = tr->current_trace;
if (mask & TRACE_ITER_FUNCTION) if (wakeup_function_set(tr, mask, set))
wakeup_function_set(tr, set); return 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (mask & TRACE_ITER_DISPLAY_GRAPH)
return wakeup_display_graph(tr, set);
#endif
return trace_keep_overwrite(tracer, mask, set); return trace_keep_overwrite(tracer, mask, set);
} }
...@@ -203,14 +219,9 @@ static void stop_func_tracer(struct trace_array *tr, int graph) ...@@ -203,14 +219,9 @@ static void stop_func_tracer(struct trace_array *tr, int graph)
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int static int wakeup_display_graph(struct trace_array *tr, int set)
wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{ {
if (!(is_graph(tr) ^ set))
if (!(bit & TRACE_DISPLAY_GRAPH))
return -EINVAL;
if (!(is_graph() ^ set))
return 0; return 0;
stop_func_tracer(tr, !set); stop_func_tracer(tr, !set);
...@@ -259,7 +270,7 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace) ...@@ -259,7 +270,7 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
static void wakeup_trace_open(struct trace_iterator *iter) static void wakeup_trace_open(struct trace_iterator *iter)
{ {
if (is_graph()) if (is_graph(iter->tr))
graph_trace_open(iter); graph_trace_open(iter);
} }
...@@ -279,7 +290,7 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter) ...@@ -279,7 +290,7 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
* In graph mode call the graph tracer output function, * In graph mode call the graph tracer output function,
* otherwise go with the TRACE_FN event handler * otherwise go with the TRACE_FN event handler
*/ */
if (is_graph()) if (is_graph(iter->tr))
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
...@@ -287,7 +298,7 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter) ...@@ -287,7 +298,7 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
static void wakeup_print_header(struct seq_file *s) static void wakeup_print_header(struct seq_file *s)
{ {
if (is_graph()) if (is_graph(wakeup_trace))
print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
else else
trace_default_header(s); trace_default_header(s);
...@@ -298,7 +309,7 @@ __trace_function(struct trace_array *tr, ...@@ -298,7 +309,7 @@ __trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
if (is_graph()) if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, flags, pc); trace_graph_function(tr, ip, parent_ip, flags, pc);
else else
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
...@@ -306,27 +317,20 @@ __trace_function(struct trace_array *tr, ...@@ -306,27 +317,20 @@ __trace_function(struct trace_array *tr,
#else #else
#define __trace_function trace_function #define __trace_function trace_function
static int
wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{
return -EINVAL;
}
static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
{
return -1;
}
static enum print_line_t wakeup_print_line(struct trace_iterator *iter) static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{ {
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
} }
static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
static void wakeup_trace_open(struct trace_iterator *iter) { } static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { } static void wakeup_trace_close(struct trace_iterator *iter) { }
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
{
return -1;
}
static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
static void wakeup_print_header(struct seq_file *s) static void wakeup_print_header(struct seq_file *s)
{ {
trace_default_header(s); trace_default_header(s);
...@@ -342,16 +346,16 @@ static void wakeup_print_header(struct seq_file *s) ...@@ -342,16 +346,16 @@ static void wakeup_print_header(struct seq_file *s)
/* /*
* Should this new latency be reported/recorded? * Should this new latency be reported/recorded?
*/ */
static int report_latency(struct trace_array *tr, cycle_t delta) static bool report_latency(struct trace_array *tr, cycle_t delta)
{ {
if (tracing_thresh) { if (tracing_thresh) {
if (delta < tracing_thresh) if (delta < tracing_thresh)
return 0; return false;
} else { } else {
if (delta <= tr->max_latency) if (delta <= tr->max_latency)
return 0; return false;
} }
return 1; return true;
} }
static void static void
...@@ -388,7 +392,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -388,7 +392,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc); trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
} }
static void static void
...@@ -416,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -416,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc); trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
} }
static void notrace static void notrace
...@@ -635,7 +639,7 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -635,7 +639,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
*/ */
smp_wmb(); smp_wmb();
if (start_func_tracer(tr, is_graph())) if (start_func_tracer(tr, is_graph(tr)))
printk(KERN_ERR "failed to start wakeup tracer\n"); printk(KERN_ERR "failed to start wakeup tracer\n");
return; return;
...@@ -648,7 +652,7 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -648,7 +652,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
static void stop_wakeup_tracer(struct trace_array *tr) static void stop_wakeup_tracer(struct trace_array *tr)
{ {
tracer_enabled = 0; tracer_enabled = 0;
stop_func_tracer(tr, is_graph()); stop_func_tracer(tr, is_graph(tr));
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
unregister_trace_sched_wakeup(probe_wakeup, NULL); unregister_trace_sched_wakeup(probe_wakeup, NULL);
...@@ -659,7 +663,7 @@ static bool wakeup_busy; ...@@ -659,7 +663,7 @@ static bool wakeup_busy;
static int __wakeup_tracer_init(struct trace_array *tr) static int __wakeup_tracer_init(struct trace_array *tr)
{ {
save_flags = trace_flags; save_flags = tr->trace_flags;
/* non overwrite screws up the latency tracers */ /* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
...@@ -740,8 +744,6 @@ static struct tracer wakeup_tracer __read_mostly = ...@@ -740,8 +744,6 @@ static struct tracer wakeup_tracer __read_mostly =
.print_max = true, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
.flag_changed = wakeup_flag_changed, .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
...@@ -762,8 +764,6 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -762,8 +764,6 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.print_max = true, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
.flag_changed = wakeup_flag_changed, .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
...@@ -784,8 +784,6 @@ static struct tracer wakeup_dl_tracer __read_mostly = ...@@ -784,8 +784,6 @@ static struct tracer wakeup_dl_tracer __read_mostly =
.print_max = true, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
.flag_changed = wakeup_flag_changed, .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
......
...@@ -16,24 +16,22 @@ ...@@ -16,24 +16,22 @@
#include "trace.h" #include "trace.h"
#define STACK_TRACE_ENTRIES 500
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; unsigned stack_trace_index[STACK_TRACE_ENTRIES];
/* /*
* Reserve one entry for the passed in ip. This will allow * Reserve one entry for the passed in ip. This will allow
* us to remove most or all of the stack size overhead * us to remove most or all of the stack size overhead
* added by the stack tracer itself. * added by the stack tracer itself.
*/ */
static struct stack_trace max_stack_trace = { struct stack_trace stack_trace_max = {
.max_entries = STACK_TRACE_ENTRIES - 1, .max_entries = STACK_TRACE_ENTRIES - 1,
.entries = &stack_dump_trace[0], .entries = &stack_dump_trace[0],
}; };
static unsigned long max_stack_size; unsigned long stack_trace_max_size;
static arch_spinlock_t max_stack_lock = arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static DEFINE_PER_CPU(int, trace_active); static DEFINE_PER_CPU(int, trace_active);
...@@ -42,30 +40,38 @@ static DEFINE_MUTEX(stack_sysctl_mutex); ...@@ -42,30 +40,38 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled; int stack_tracer_enabled;
static int last_stack_tracer_enabled; static int last_stack_tracer_enabled;
static inline void print_max_stack(void) void stack_trace_print(void)
{ {
long i; long i;
int size; int size;
pr_emerg(" Depth Size Location (%d entries)\n" pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n", " ----- ---- --------\n",
max_stack_trace.nr_entries); stack_trace_max.nr_entries);
for (i = 0; i < max_stack_trace.nr_entries; i++) { for (i = 0; i < stack_trace_max.nr_entries; i++) {
if (stack_dump_trace[i] == ULONG_MAX) if (stack_dump_trace[i] == ULONG_MAX)
break; break;
if (i+1 == max_stack_trace.nr_entries || if (i+1 == stack_trace_max.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX) stack_dump_trace[i+1] == ULONG_MAX)
size = stack_dump_index[i]; size = stack_trace_index[i];
else else
size = stack_dump_index[i] - stack_dump_index[i+1]; size = stack_trace_index[i] - stack_trace_index[i+1];
pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i], pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
size, (void *)stack_dump_trace[i]); size, (void *)stack_dump_trace[i]);
} }
} }
static inline void /*
* When arch-specific code overides this function, the following
* data should be filled up, assuming stack_trace_max_lock is held to
* prevent concurrent updates.
* stack_trace_index[]
* stack_trace_max
* stack_trace_max_size
*/
void __weak
check_stack(unsigned long ip, unsigned long *stack) check_stack(unsigned long ip, unsigned long *stack)
{ {
unsigned long this_size, flags; unsigned long *p, *top, *start; unsigned long this_size, flags; unsigned long *p, *top, *start;
...@@ -78,7 +84,7 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -78,7 +84,7 @@ check_stack(unsigned long ip, unsigned long *stack)
/* Remove the frame of the tracer */ /* Remove the frame of the tracer */
this_size -= frame_size; this_size -= frame_size;
if (this_size <= max_stack_size) if (this_size <= stack_trace_max_size)
return; return;
/* we do not handle interrupt stacks yet */ /* we do not handle interrupt stacks yet */
...@@ -90,7 +96,7 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -90,7 +96,7 @@ check_stack(unsigned long ip, unsigned long *stack)
return; return;
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&max_stack_lock); arch_spin_lock(&stack_trace_max_lock);
/* /*
* RCU may not be watching, make it see us. * RCU may not be watching, make it see us.
...@@ -103,18 +109,18 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -103,18 +109,18 @@ check_stack(unsigned long ip, unsigned long *stack)
this_size -= tracer_frame; this_size -= tracer_frame;
/* a race could have already updated it */ /* a race could have already updated it */
if (this_size <= max_stack_size) if (this_size <= stack_trace_max_size)
goto out; goto out;
max_stack_size = this_size; stack_trace_max_size = this_size;
max_stack_trace.nr_entries = 0; stack_trace_max.nr_entries = 0;
max_stack_trace.skip = 3; stack_trace_max.skip = 3;
save_stack_trace(&max_stack_trace); save_stack_trace(&stack_trace_max);
/* Skip over the overhead of the stack tracer itself */ /* Skip over the overhead of the stack tracer itself */
for (i = 0; i < max_stack_trace.nr_entries; i++) { for (i = 0; i < stack_trace_max.nr_entries; i++) {
if (stack_dump_trace[i] == ip) if (stack_dump_trace[i] == ip)
break; break;
} }
...@@ -134,18 +140,18 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -134,18 +140,18 @@ check_stack(unsigned long ip, unsigned long *stack)
* loop will only happen once. This code only takes place * loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path. * on a new max, so it is far from a fast path.
*/ */
while (i < max_stack_trace.nr_entries) { while (i < stack_trace_max.nr_entries) {
int found = 0; int found = 0;
stack_dump_index[x] = this_size; stack_trace_index[x] = this_size;
p = start; p = start;
for (; p < top && i < max_stack_trace.nr_entries; p++) { for (; p < top && i < stack_trace_max.nr_entries; p++) {
if (stack_dump_trace[i] == ULONG_MAX) if (stack_dump_trace[i] == ULONG_MAX)
break; break;
if (*p == stack_dump_trace[i]) { if (*p == stack_dump_trace[i]) {
stack_dump_trace[x] = stack_dump_trace[i++]; stack_dump_trace[x] = stack_dump_trace[i++];
this_size = stack_dump_index[x++] = this_size = stack_trace_index[x++] =
(top - p) * sizeof(unsigned long); (top - p) * sizeof(unsigned long);
found = 1; found = 1;
/* Start the search from here */ /* Start the search from here */
...@@ -160,7 +166,7 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -160,7 +166,7 @@ check_stack(unsigned long ip, unsigned long *stack)
if (unlikely(!tracer_frame)) { if (unlikely(!tracer_frame)) {
tracer_frame = (p - stack) * tracer_frame = (p - stack) *
sizeof(unsigned long); sizeof(unsigned long);
max_stack_size -= tracer_frame; stack_trace_max_size -= tracer_frame;
} }
} }
} }
...@@ -169,18 +175,18 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -169,18 +175,18 @@ check_stack(unsigned long ip, unsigned long *stack)
i++; i++;
} }
max_stack_trace.nr_entries = x; stack_trace_max.nr_entries = x;
for (; x < i; x++) for (; x < i; x++)
stack_dump_trace[x] = ULONG_MAX; stack_dump_trace[x] = ULONG_MAX;
if (task_stack_end_corrupted(current)) { if (task_stack_end_corrupted(current)) {
print_max_stack(); stack_trace_print();
BUG(); BUG();
} }
out: out:
rcu_irq_exit(); rcu_irq_exit();
arch_spin_unlock(&max_stack_lock); arch_spin_unlock(&stack_trace_max_lock);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -251,9 +257,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, ...@@ -251,9 +257,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
cpu = smp_processor_id(); cpu = smp_processor_id();
per_cpu(trace_active, cpu)++; per_cpu(trace_active, cpu)++;
arch_spin_lock(&max_stack_lock); arch_spin_lock(&stack_trace_max_lock);
*ptr = val; *ptr = val;
arch_spin_unlock(&max_stack_lock); arch_spin_unlock(&stack_trace_max_lock);
per_cpu(trace_active, cpu)--; per_cpu(trace_active, cpu)--;
local_irq_restore(flags); local_irq_restore(flags);
...@@ -273,7 +279,7 @@ __next(struct seq_file *m, loff_t *pos) ...@@ -273,7 +279,7 @@ __next(struct seq_file *m, loff_t *pos)
{ {
long n = *pos - 1; long n = *pos - 1;
if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
return NULL; return NULL;
m->private = (void *)n; m->private = (void *)n;
...@@ -296,7 +302,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) ...@@ -296,7 +302,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
cpu = smp_processor_id(); cpu = smp_processor_id();
per_cpu(trace_active, cpu)++; per_cpu(trace_active, cpu)++;
arch_spin_lock(&max_stack_lock); arch_spin_lock(&stack_trace_max_lock);
if (*pos == 0) if (*pos == 0)
return SEQ_START_TOKEN; return SEQ_START_TOKEN;
...@@ -308,7 +314,7 @@ static void t_stop(struct seq_file *m, void *p) ...@@ -308,7 +314,7 @@ static void t_stop(struct seq_file *m, void *p)
{ {
int cpu; int cpu;
arch_spin_unlock(&max_stack_lock); arch_spin_unlock(&stack_trace_max_lock);
cpu = smp_processor_id(); cpu = smp_processor_id();
per_cpu(trace_active, cpu)--; per_cpu(trace_active, cpu)--;
...@@ -343,9 +349,9 @@ static int t_show(struct seq_file *m, void *v) ...@@ -343,9 +349,9 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, " Depth Size Location" seq_printf(m, " Depth Size Location"
" (%d entries)\n" " (%d entries)\n"
" ----- ---- --------\n", " ----- ---- --------\n",
max_stack_trace.nr_entries); stack_trace_max.nr_entries);
if (!stack_tracer_enabled && !max_stack_size) if (!stack_tracer_enabled && !stack_trace_max_size)
print_disabled(m); print_disabled(m);
return 0; return 0;
...@@ -353,17 +359,17 @@ static int t_show(struct seq_file *m, void *v) ...@@ -353,17 +359,17 @@ static int t_show(struct seq_file *m, void *v)
i = *(long *)v; i = *(long *)v;
if (i >= max_stack_trace.nr_entries || if (i >= stack_trace_max.nr_entries ||
stack_dump_trace[i] == ULONG_MAX) stack_dump_trace[i] == ULONG_MAX)
return 0; return 0;
if (i+1 == max_stack_trace.nr_entries || if (i+1 == stack_trace_max.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX) stack_dump_trace[i+1] == ULONG_MAX)
size = stack_dump_index[i]; size = stack_trace_index[i];
else else
size = stack_dump_index[i] - stack_dump_index[i+1]; size = stack_trace_index[i] - stack_trace_index[i+1];
seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
trace_lookup_stack(m, i); trace_lookup_stack(m, i);
...@@ -453,7 +459,7 @@ static __init int stack_trace_init(void) ...@@ -453,7 +459,7 @@ static __init int stack_trace_init(void)
return 0; return 0;
trace_create_file("stack_max_size", 0644, d_tracer, trace_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops); &stack_trace_max_size, &stack_max_size_fops);
trace_create_file("stack_trace", 0444, d_tracer, trace_create_file("stack_trace", 0444, d_tracer,
NULL, &stack_trace_fops); NULL, &stack_trace_fops);
......
...@@ -110,6 +110,7 @@ static enum print_line_t ...@@ -110,6 +110,7 @@ static enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags, print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event) struct trace_event *event)
{ {
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent; struct trace_entry *ent = iter->ent;
struct syscall_trace_enter *trace; struct syscall_trace_enter *trace;
...@@ -136,7 +137,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags, ...@@ -136,7 +137,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
goto end; goto end;
/* parameter types */ /* parameter types */
if (trace_flags & TRACE_ITER_VERBOSE) if (tr->trace_flags & TRACE_ITER_VERBOSE)
trace_seq_printf(s, "%s ", entry->types[i]); trace_seq_printf(s, "%s ", entry->types[i]);
/* parameter values */ /* parameter values */
......
...@@ -91,11 +91,13 @@ static void debug_print_probes(struct tracepoint_func *funcs) ...@@ -91,11 +91,13 @@ static void debug_print_probes(struct tracepoint_func *funcs)
printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
} }
static struct tracepoint_func *func_add(struct tracepoint_func **funcs, static struct tracepoint_func *
struct tracepoint_func *tp_func) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
int prio)
{ {
int nr_probes = 0;
struct tracepoint_func *old, *new; struct tracepoint_func *old, *new;
int nr_probes = 0;
int pos = -1;
if (WARN_ON(!tp_func->func)) if (WARN_ON(!tp_func->func))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -104,18 +106,33 @@ static struct tracepoint_func *func_add(struct tracepoint_func **funcs, ...@@ -104,18 +106,33 @@ static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
old = *funcs; old = *funcs;
if (old) { if (old) {
/* (N -> N+1), (N != 0, 1) probes */ /* (N -> N+1), (N != 0, 1) probes */
for (nr_probes = 0; old[nr_probes].func; nr_probes++) for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
/* Insert before probes of lower priority */
if (pos < 0 && old[nr_probes].prio < prio)
pos = nr_probes;
if (old[nr_probes].func == tp_func->func && if (old[nr_probes].func == tp_func->func &&
old[nr_probes].data == tp_func->data) old[nr_probes].data == tp_func->data)
return ERR_PTR(-EEXIST); return ERR_PTR(-EEXIST);
} }
}
/* + 2 : one for new probe, one for NULL func */ /* + 2 : one for new probe, one for NULL func */
new = allocate_probes(nr_probes + 2); new = allocate_probes(nr_probes + 2);
if (new == NULL) if (new == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (old) if (old) {
if (pos < 0) {
pos = nr_probes;
memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
new[nr_probes] = *tp_func; } else {
/* Copy higher priority probes ahead of the new probe */
memcpy(new, old, pos * sizeof(struct tracepoint_func));
/* Copy the rest after it. */
memcpy(new + pos + 1, old + pos,
(nr_probes - pos) * sizeof(struct tracepoint_func));
}
} else
pos = 0;
new[pos] = *tp_func;
new[nr_probes + 1].func = NULL; new[nr_probes + 1].func = NULL;
*funcs = new; *funcs = new;
debug_print_probes(*funcs); debug_print_probes(*funcs);
...@@ -174,7 +191,7 @@ static void *func_remove(struct tracepoint_func **funcs, ...@@ -174,7 +191,7 @@ static void *func_remove(struct tracepoint_func **funcs,
* Add the probe function to a tracepoint. * Add the probe function to a tracepoint.
*/ */
static int tracepoint_add_func(struct tracepoint *tp, static int tracepoint_add_func(struct tracepoint *tp,
struct tracepoint_func *func) struct tracepoint_func *func, int prio)
{ {
struct tracepoint_func *old, *tp_funcs; struct tracepoint_func *old, *tp_funcs;
...@@ -183,7 +200,7 @@ static int tracepoint_add_func(struct tracepoint *tp, ...@@ -183,7 +200,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
tp_funcs = rcu_dereference_protected(tp->funcs, tp_funcs = rcu_dereference_protected(tp->funcs,
lockdep_is_held(&tracepoints_mutex)); lockdep_is_held(&tracepoints_mutex));
old = func_add(&tp_funcs, func); old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) { if (IS_ERR(old)) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return PTR_ERR(old); return PTR_ERR(old);
...@@ -240,6 +257,7 @@ static int tracepoint_remove_func(struct tracepoint *tp, ...@@ -240,6 +257,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
* @tp: tracepoint * @tp: tracepoint
* @probe: probe handler * @probe: probe handler
* @data: tracepoint data * @data: tracepoint data
* @prio: priority of this function over other registered functions
* *
* Returns 0 if ok, error value on error. * Returns 0 if ok, error value on error.
* Note: if @tp is within a module, the caller is responsible for * Note: if @tp is within a module, the caller is responsible for
...@@ -247,7 +265,8 @@ static int tracepoint_remove_func(struct tracepoint *tp, ...@@ -247,7 +265,8 @@ static int tracepoint_remove_func(struct tracepoint *tp,
* performed either with a tracepoint module going notifier, or from * performed either with a tracepoint module going notifier, or from
* within module exit functions. * within module exit functions.
*/ */
int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
void *data, int prio)
{ {
struct tracepoint_func tp_func; struct tracepoint_func tp_func;
int ret; int ret;
...@@ -255,10 +274,30 @@ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) ...@@ -255,10 +274,30 @@ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
tp_func.func = probe; tp_func.func = probe;
tp_func.data = data; tp_func.data = data;
ret = tracepoint_add_func(tp, &tp_func); tp_func.prio = prio;
ret = tracepoint_add_func(tp, &tp_func, prio);
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
/**
* tracepoint_probe_register - Connect a probe to a tracepoint
* @tp: tracepoint
* @probe: probe handler
* @data: tracepoint data
* @prio: priority of this function over other registered functions
*
* Returns 0 if ok, error value on error.
* Note: if @tp is within a module, the caller is responsible for
* unregistering the probe before the module is gone. This can be
* performed either with a tracepoint module going notifier, or from
* within module exit functions.
*/
int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
{
return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
}
EXPORT_SYMBOL_GPL(tracepoint_probe_register); EXPORT_SYMBOL_GPL(tracepoint_probe_register);
/** /**
......
...@@ -4,14 +4,14 @@ ...@@ -4,14 +4,14 @@
* *
* The define_trace.h below will also look for a file name of * The define_trace.h below will also look for a file name of
* TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here. * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
* In this case, it would look for sample.h * In this case, it would look for sample-trace.h
* *
* If the header name will be different than the system name * If the header name will be different than the system name
* (as in this case), then you can override the header name that * (as in this case), then you can override the header name that
* define_trace.h will look up by defining TRACE_INCLUDE_FILE * define_trace.h will look up by defining TRACE_INCLUDE_FILE
* *
* This file is called trace-events-sample.h but we want the system * This file is called trace-events-sample.h but we want the system
* to be called "sample". Therefore we must define the name of this * to be called "sample-trace". Therefore we must define the name of this
* file: * file:
* *
* #define TRACE_INCLUDE_FILE trace-events-sample * #define TRACE_INCLUDE_FILE trace-events-sample
...@@ -106,7 +106,7 @@ ...@@ -106,7 +106,7 @@
* *
* memcpy(__entry->foo, bar, 10); * memcpy(__entry->foo, bar, 10);
* *
* __dynamic_array: This is similar to array, but can vary is size from * __dynamic_array: This is similar to array, but can vary its size from
* instance to instance of the tracepoint being called. * instance to instance of the tracepoint being called.
* Like __array, this too has three elements (type, name, size); * Like __array, this too has three elements (type, name, size);
* type is the type of the element, name is the name of the array. * type is the type of the element, name is the name of the array.
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#ifndef EM_AARCH64 #ifndef EM_AARCH64
#define EM_AARCH64 183 #define EM_AARCH64 183
#define R_AARCH64_NONE 0
#define R_AARCH64_ABS64 257 #define R_AARCH64_ABS64 257
#endif #endif
...@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset) ...@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset)
return 0; return 0;
} }
static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
static int make_nop_arm64(void *map, size_t const offset)
{
uint32_t *ptr;
ptr = map + offset;
/* bl <_mcount> is 0x94000000 before relocation */
if (*ptr != 0x94000000)
return -1;
/* Convert to nop */
ulseek(fd_map, offset, SEEK_SET);
uwrite(fd_map, ideal_nop, 4);
return 0;
}
/* /*
* Get the whole file as a programming convenience in order to avoid * Get the whole file as a programming convenience in order to avoid
* malloc+lseek+read+free of many pieces. If successful, then mmap * malloc+lseek+read+free of many pieces. If successful, then mmap
...@@ -345,6 +362,7 @@ do_file(char const *const fname) ...@@ -345,6 +362,7 @@ do_file(char const *const fname)
break; break;
case EM_386: case EM_386:
reltype = R_386_32; reltype = R_386_32;
rel_type_nop = R_386_NONE;
make_nop = make_nop_x86; make_nop = make_nop_x86;
ideal_nop = ideal_nop5_x86_32; ideal_nop = ideal_nop5_x86_32;
mcount_adjust_32 = -1; mcount_adjust_32 = -1;
...@@ -353,7 +371,12 @@ do_file(char const *const fname) ...@@ -353,7 +371,12 @@ do_file(char const *const fname)
altmcount = "__gnu_mcount_nc"; altmcount = "__gnu_mcount_nc";
break; break;
case EM_AARCH64: case EM_AARCH64:
reltype = R_AARCH64_ABS64; gpfx = '_'; break; reltype = R_AARCH64_ABS64;
make_nop = make_nop_arm64;
rel_type_nop = R_AARCH64_NONE;
ideal_nop = ideal_nop4_arm64;
gpfx = '_';
break;
case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
case EM_METAG: reltype = R_METAG_ADDR32; case EM_METAG: reltype = R_METAG_ADDR32;
altmcount = "_mcount_wrapper"; altmcount = "_mcount_wrapper";
...@@ -371,6 +394,7 @@ do_file(char const *const fname) ...@@ -371,6 +394,7 @@ do_file(char const *const fname)
make_nop = make_nop_x86; make_nop = make_nop_x86;
ideal_nop = ideal_nop5_x86_64; ideal_nop = ideal_nop5_x86_64;
reltype = R_X86_64_64; reltype = R_X86_64_64;
rel_type_nop = R_X86_64_NONE;
mcount_adjust_64 = -1; mcount_adjust_64 = -1;
break; break;
} /* end switch */ } /* end switch */
......
...@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr, ...@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr,
if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
if (make_nop) if (make_nop)
ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
if (warn_on_notrace_sect && !once) { if (warn_on_notrace_sect && !once) {
printf("Section %s has mcount callers being ignored\n", printf("Section %s has mcount callers being ignored\n",
txtname); txtname);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment