Commit 4c174688 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "New features for this release:

   - Pretty much a full rewrite of the processing of function plugins.
     i.e. echo do_IRQ:stacktrace > set_ftrace_filter

   - The rewrite was needed to add plugins to be unique to tracing
     instances. i.e. mkdir instance/foo; cd instances/foo; echo
     do_IRQ:stacktrace > set_ftrace_filter The old way was written very
     hacky. This removes a lot of those hacks.

   - New "function-fork" tracing option. When set, pids in the
     set_ftrace_pid will have their children added when the processes
     with their pids listed in the set_ftrace_pid file forks.

   - Exposure of "maxactive" for kretprobe in kprobe_events

   - Allow for builtin init functions to be traced by the function
     tracer (via the kernel command line). Module init function tracing
     will come in the next release.

   - Added more selftests, and have selftests also test in an instance"

* tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (60 commits)
  ring-buffer: Return reader page back into existing ring buffer
  selftests: ftrace: Allow some event trigger tests to run in an instance
  selftests: ftrace: Have some basic tests run in a tracing instance too
  selftests: ftrace: Have event tests also run in an tracing instance
  selftests: ftrace: Make func_event_triggers and func_traceonoff_triggers tests do instances
  selftests: ftrace: Allow some tests to be run in a tracing instance
  tracing/ftrace: Allow for instances to trigger their own stacktrace probes
  tracing/ftrace: Allow for the traceonoff probe be unique to instances
  tracing/ftrace: Enable snapshot function trigger to work with instances
  tracing/ftrace: Allow instances to have their own function probes
  tracing/ftrace: Add a better way to pass data via the probe functions
  ftrace: Dynamically create the probe ftrace_ops for the trace_array
  tracing: Pass the trace_array into ftrace_probe_ops functions
  tracing: Have the trace_array hold the list of registered func probes
  ftrace: If the hash for a probe fails to update then free what was initialized
  ftrace: Have the function probes call their own function
  ftrace: Have each function probe use its own ftrace_ops
  ftrace: Have unregister_ftrace_function_probe_func() return a value
  ftrace: Add helper function ftrace_hash_move_and_update_ops()
  ftrace: Remove data field from ftrace_func_probe structure
  ...
parents 9c35baf6 73a757e6
...@@ -24,7 +24,7 @@ current_tracer. Instead of that, add probe points via ...@@ -24,7 +24,7 @@ current_tracer. Instead of that, add probe points via
Synopsis of kprobe_events Synopsis of kprobe_events
------------------------- -------------------------
p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
r[:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
-:[GRP/]EVENT : Clear a probe -:[GRP/]EVENT : Clear a probe
GRP : Group name. If omitted, use "kprobes" for it. GRP : Group name. If omitted, use "kprobes" for it.
...@@ -33,6 +33,9 @@ Synopsis of kprobe_events ...@@ -33,6 +33,9 @@ Synopsis of kprobe_events
MOD : Module name which has given SYM. MOD : Module name which has given SYM.
SYM[+offs] : Symbol+offset where the probe is inserted. SYM[+offs] : Symbol+offset where the probe is inserted.
MEMADDR : Address where the probe is inserted. MEMADDR : Address where the probe is inserted.
MAXACTIVE : Maximum number of instances of the specified function that
can be probed simultaneously, or 0 for the default value
as defined in Documentation/kprobes.txt section 1.3.1.
FETCHARGS : Arguments. Each probe can have up to 128 args. FETCHARGS : Arguments. Each probe can have up to 128 args.
%REG : Fetch register REG %REG : Fetch register REG
......
...@@ -533,7 +533,13 @@ static void do_sync_core(void *data) ...@@ -533,7 +533,13 @@ static void do_sync_core(void *data)
static void run_sync(void) static void run_sync(void)
{ {
int enable_irqs = irqs_disabled(); int enable_irqs;
/* No need to sync if there's only one CPU */
if (num_online_cpus() == 1)
return;
enable_irqs = irqs_disabled();
/* We may be called with interrupts disabled (on bootup). */ /* We may be called with interrupts disabled (on bootup). */
if (enable_irqs) if (enable_irqs)
......
...@@ -42,8 +42,10 @@ ...@@ -42,8 +42,10 @@
/* Main tracing buffer and events set up */ /* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void trace_init(void); void trace_init(void);
void early_trace_init(void);
#else #else
static inline void trace_init(void) { } static inline void trace_init(void) { }
static inline void early_trace_init(void) { }
#endif #endif
struct module; struct module;
...@@ -144,6 +146,10 @@ struct ftrace_ops_hash { ...@@ -144,6 +146,10 @@ struct ftrace_ops_hash {
struct ftrace_hash *filter_hash; struct ftrace_hash *filter_hash;
struct mutex regex_lock; struct mutex regex_lock;
}; };
void ftrace_free_init_mem(void);
#else
static inline void ftrace_free_init_mem(void) { }
#endif #endif
/* /*
...@@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void) ...@@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void)
} }
static inline void clear_ftrace_function(void) { } static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { } static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER #ifdef CONFIG_STACK_TRACER
...@@ -279,15 +286,45 @@ int ...@@ -279,15 +286,45 @@ int
stack_trace_sysctl(struct ctl_table *table, int write, stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
#endif
struct ftrace_func_command { /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
struct list_head list; DECLARE_PER_CPU(int, disable_stack_tracer);
char *name;
int (*func)(struct ftrace_hash *hash, /**
char *func, char *cmd, * stack_tracer_disable - temporarily disable the stack tracer
char *params, int enable); *
}; * There's a few locations (namely in RCU) where stack tracing
* cannot be executed. This function is used to disable stack
* tracing during those critical sections.
*
* This function must be called with preemption or interrupts
* disabled and stack_tracer_enable() must be called shortly after
* while preemption or interrupts are still disabled.
*/
static inline void stack_tracer_disable(void)
{
/* Preemption or interupts must be disabled */
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
}
/**
* stack_tracer_enable - re-enable the stack tracer
*
* After stack_tracer_disable() is called, stack_tracer_enable()
* must be called shortly afterward.
*/
static inline void stack_tracer_enable(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(disable_stack_tracer);
}
#else
static inline void stack_tracer_disable(void) { }
static inline void stack_tracer_enable(void) { }
#endif
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
...@@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec); ...@@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file; struct seq_file;
struct ftrace_probe_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*init)(struct ftrace_probe_ops *ops,
unsigned long ip, void **data);
void (*free)(struct ftrace_probe_ops *ops,
unsigned long ip, void **data);
int (*print)(struct seq_file *m,
unsigned long ip,
struct ftrace_probe_ops *ops,
void *data);
};
extern int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data);
extern void
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data);
extern void
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
extern int ftrace_text_reserved(const void *start, const void *end); extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void); extern int ftrace_nr_registered_ops(void);
...@@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); ...@@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops); void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops); void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
enum { enum {
FTRACE_UPDATE_CALLS = (1 << 0), FTRACE_UPDATE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1), FTRACE_DISABLE_CALLS = (1 << 1),
...@@ -433,8 +443,8 @@ enum { ...@@ -433,8 +443,8 @@ enum {
FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_FILTER = (1 << 0),
FTRACE_ITER_NOTRACE = (1 << 1), FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL = (1 << 2), FTRACE_ITER_PRINTALL = (1 << 2),
FTRACE_ITER_DO_HASH = (1 << 3), FTRACE_ITER_DO_PROBES = (1 << 3),
FTRACE_ITER_HASH = (1 << 4), FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_ENABLED = (1 << 5), FTRACE_ITER_ENABLED = (1 << 5),
}; };
...@@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { } ...@@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { } static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { } static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { } static inline void ftrace_release_mod(struct module *mod) { }
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
}
static inline __init int unregister_ftrace_command(char *cmd_name)
{
return -EINVAL;
}
static inline int ftrace_text_reserved(const void *start, const void *end) static inline int ftrace_text_reserved(const void *start, const void *end)
{ {
return 0; return 0;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
/* These are for everybody (although not all archs will actually /* These are for everybody (although not all archs will actually
discard it in modules) */ discard it in modules) */
#define __init __section(.init.text) __cold notrace __latent_entropy #define __init __section(.init.text) __cold __inittrace __latent_entropy
#define __initdata __section(.init.data) #define __initdata __section(.init.data)
#define __initconst __section(.init.rodata) #define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data) #define __exitdata __section(.exit.data)
...@@ -68,8 +68,10 @@ ...@@ -68,8 +68,10 @@
#ifdef MODULE #ifdef MODULE
#define __exitused #define __exitused
#define __inittrace notrace
#else #else
#define __exitused __used #define __exitused __used
#define __inittrace
#endif #endif
#define __exit __section(.exit.text) __exitused __cold notrace #define __exit __section(.exit.text) __exitused __cold notrace
......
...@@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, ...@@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
unsigned long secs, unsigned long secs,
unsigned long c_old, unsigned long c_old,
unsigned long c); unsigned long c);
bool rcu_irq_enter_disabled(void);
#else #else
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
int *flags, int *flags,
...@@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void) ...@@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void)
static inline void rcutorture_record_progress(unsigned long vernum) static inline void rcutorture_record_progress(unsigned long vernum)
{ {
} }
static inline bool rcu_irq_enter_disabled(void)
{
return false;
}
#ifdef CONFIG_RCU_TRACE #ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename, void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp, struct rcu_head *rhp,
......
...@@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page); ...@@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
size_t len, int cpu, int full); size_t len, int cpu, int full);
......
...@@ -138,16 +138,7 @@ enum print_line_t { ...@@ -138,16 +138,7 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
}; };
/* enum print_line_t trace_handle_return(struct trace_seq *s);
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
* simplifies those functions and keeps them in sync.
*/
static inline enum print_line_t trace_handle_return(struct trace_seq *s)
{
return trace_seq_has_overflowed(s) ?
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
}
void tracing_generic_entry_update(struct trace_entry *entry, void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags, unsigned long flags,
......
...@@ -128,7 +128,7 @@ extern void syscall_unregfunc(void); ...@@ -128,7 +128,7 @@ extern void syscall_unregfunc(void);
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/ */
#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ #define __DO_TRACE(tp, proto, args, cond, rcucheck) \
do { \ do { \
struct tracepoint_func *it_func_ptr; \ struct tracepoint_func *it_func_ptr; \
void *it_func; \ void *it_func; \
...@@ -136,7 +136,11 @@ extern void syscall_unregfunc(void); ...@@ -136,7 +136,11 @@ extern void syscall_unregfunc(void);
\ \
if (!(cond)) \ if (!(cond)) \
return; \ return; \
prercu; \ if (rcucheck) { \
if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
return; \
rcu_irq_enter_irqson(); \
} \
rcu_read_lock_sched_notrace(); \ rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \ it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \ if (it_func_ptr) { \
...@@ -147,7 +151,8 @@ extern void syscall_unregfunc(void); ...@@ -147,7 +151,8 @@ extern void syscall_unregfunc(void);
} while ((++it_func_ptr)->func); \ } while ((++it_func_ptr)->func); \
} \ } \
rcu_read_unlock_sched_notrace(); \ rcu_read_unlock_sched_notrace(); \
postrcu; \ if (rcucheck) \
rcu_irq_exit_irqson(); \
} while (0) } while (0)
#ifndef MODULE #ifndef MODULE
...@@ -158,9 +163,7 @@ extern void syscall_unregfunc(void); ...@@ -158,9 +163,7 @@ extern void syscall_unregfunc(void);
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond), \ TP_CONDITION(cond), 1); \
rcu_irq_enter_irqson(), \
rcu_irq_exit_irqson()); \
} }
#else #else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
...@@ -186,7 +189,7 @@ extern void syscall_unregfunc(void); ...@@ -186,7 +189,7 @@ extern void syscall_unregfunc(void);
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond),,); \ TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \ rcu_read_lock_sched_notrace(); \
rcu_dereference_sched(__tracepoint_##name.funcs);\ rcu_dereference_sched(__tracepoint_##name.funcs);\
......
...@@ -545,6 +545,11 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -545,6 +545,11 @@ asmlinkage __visible void __init start_kernel(void)
trap_init(); trap_init();
mm_init(); mm_init();
ftrace_init();
/* trace_printk can be enabled here */
early_trace_init();
/* /*
* Set up the scheduler prior starting any interrupts (such as the * Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init() * timer interrupt). Full topology setup happens at smp_init()
...@@ -570,7 +575,7 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -570,7 +575,7 @@ asmlinkage __visible void __init start_kernel(void)
rcu_init(); rcu_init();
/* trace_printk() and trace points may be used after this */ /* Trace events are available after this */
trace_init(); trace_init();
context_tracking_init(); context_tracking_init();
...@@ -670,8 +675,6 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -670,8 +675,6 @@ asmlinkage __visible void __init start_kernel(void)
efi_free_boot_services(); efi_free_boot_services();
} }
ftrace_init();
/* Do the rest non-__init'ed, we're now alive */ /* Do the rest non-__init'ed, we're now alive */
rest_init(); rest_init();
} }
...@@ -959,6 +962,7 @@ static int __ref kernel_init(void *unused) ...@@ -959,6 +962,7 @@ static int __ref kernel_init(void *unused)
kernel_init_freeable(); kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */ /* need to finish all async __init code before freeing the memory */
async_synchronize_full(); async_synchronize_full();
ftrace_free_init_mem();
free_initmem(); free_initmem();
mark_readonly(); mark_readonly();
system_state = SYSTEM_RUNNING; system_state = SYSTEM_RUNNING;
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/trace_events.h> #include <linux/trace_events.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/ftrace.h>
#include "tree.h" #include "tree.h"
#include "rcu.h" #include "rcu.h"
...@@ -283,6 +284,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { ...@@ -283,6 +284,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
}; };
/*
* There's a few places, currently just in the tracing infrastructure,
* that uses rcu_irq_enter() to make sure RCU is watching. But there's
* a small location where that will not even work. In those cases
* rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
* can be called.
*/
static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
bool rcu_irq_enter_disabled(void)
{
return this_cpu_read(disable_rcu_irq_enter);
}
/* /*
* Record entry into an extended quiescent state. This is only to be * Record entry into an extended quiescent state. This is only to be
* called when not already in an extended quiescent state. * called when not already in an extended quiescent state.
...@@ -771,25 +786,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -771,25 +786,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
} }
/* /*
* rcu_eqs_enter_common - current CPU is moving towards extended quiescent state * rcu_eqs_enter_common - current CPU is entering an extended quiescent state
* *
* If the new value of the ->dynticks_nesting counter now is zero, * Enter idle, doing appropriate accounting. The caller must have
* we really have entered idle, and must do the appropriate accounting. * disabled interrupts.
* The caller must have disabled interrupts.
*/ */
static void rcu_eqs_enter_common(long long oldval, bool user) static void rcu_eqs_enter_common(bool user)
{ {
struct rcu_state *rsp; struct rcu_state *rsp;
struct rcu_data *rdp; struct rcu_data *rdp;
RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);) struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) { !user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id()); idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
rcu_ftrace_dump(DUMP_ORIG); rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm, current->pid, current->comm,
...@@ -800,7 +814,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user) ...@@ -800,7 +814,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
do_nocb_deferred_wakeup(rdp); do_nocb_deferred_wakeup(rdp);
} }
rcu_prepare_for_idle(); rcu_prepare_for_idle();
rcu_dynticks_eqs_enter(); __this_cpu_inc(disable_rcu_irq_enter);
rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
__this_cpu_dec(disable_rcu_irq_enter);
rcu_dynticks_task_enter(); rcu_dynticks_task_enter();
/* /*
...@@ -821,19 +838,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user) ...@@ -821,19 +838,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
*/ */
static void rcu_eqs_enter(bool user) static void rcu_eqs_enter(bool user)
{ {
long long oldval;
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
(oldval & DYNTICK_TASK_NEST_MASK) == 0); (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
rdtp->dynticks_nesting = 0; rcu_eqs_enter_common(user);
rcu_eqs_enter_common(oldval, user); else
} else {
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
}
} }
/** /**
...@@ -892,19 +905,18 @@ void rcu_user_enter(void) ...@@ -892,19 +905,18 @@ void rcu_user_enter(void)
*/ */
void rcu_irq_exit(void) void rcu_irq_exit(void)
{ {
long long oldval;
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
rdtp->dynticks_nesting < 0); rdtp->dynticks_nesting < 1);
if (rdtp->dynticks_nesting) if (rdtp->dynticks_nesting <= 1) {
trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); rcu_eqs_enter_common(true);
else } else {
rcu_eqs_enter_common(oldval, true); trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
rdtp->dynticks_nesting--;
}
rcu_sysidle_enter(1); rcu_sysidle_enter(1);
} }
......
...@@ -135,6 +135,7 @@ config FUNCTION_TRACER ...@@ -135,6 +135,7 @@ config FUNCTION_TRACER
select GENERIC_TRACER select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
select GLOB select GLOB
select TASKS_RCU if PREEMPT
help help
Enable the kernel to trace every kernel function. This is done Enable the kernel to trace every kernel function. This is done
by using a compiler feature to insert a small, 5-byte No-Operation by using a compiler feature to insert a small, 5-byte No-Operation
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include "trace_output.h" #include "trace_output.h"
...@@ -1095,22 +1096,20 @@ static bool update_all_ops; ...@@ -1095,22 +1096,20 @@ static bool update_all_ops;
# error Dynamic ftrace depends on MCOUNT_RECORD # error Dynamic ftrace depends on MCOUNT_RECORD
#endif #endif
static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
struct ftrace_func_probe {
struct hlist_node node;
struct ftrace_probe_ops *ops;
unsigned long flags;
unsigned long ip;
void *data;
struct list_head free_list;
};
struct ftrace_func_entry { struct ftrace_func_entry {
struct hlist_node hlist; struct hlist_node hlist;
unsigned long ip; unsigned long ip;
}; };
struct ftrace_func_probe {
struct ftrace_probe_ops *probe_ops;
struct ftrace_ops ops;
struct trace_array *tr;
struct list_head list;
void *data;
int ref;
};
/* /*
* We make these constant because no one should touch them, * We make these constant because no one should touch them,
* but they are used as the default "empty hash", to avoid allocating * but they are used as the default "empty hash", to avoid allocating
...@@ -1271,7 +1270,7 @@ static void ...@@ -1271,7 +1270,7 @@ static void
remove_hash_entry(struct ftrace_hash *hash, remove_hash_entry(struct ftrace_hash *hash,
struct ftrace_func_entry *entry) struct ftrace_func_entry *entry)
{ {
hlist_del(&entry->hlist); hlist_del_rcu(&entry->hlist);
hash->count--; hash->count--;
} }
...@@ -2807,8 +2806,9 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) ...@@ -2807,8 +2806,9 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* callers are done before leaving this function. * callers are done before leaving this function.
* The same goes for freeing the per_cpu data of the per_cpu * The same goes for freeing the per_cpu data of the per_cpu
* ops. * ops.
* */
* Again, normal synchronize_sched() is not good enough. if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
/*
* We need to do a hard force of sched synchronization. * We need to do a hard force of sched synchronization.
* This is because we use preempt_disable() to do RCU, but * This is because we use preempt_disable() to do RCU, but
* the function tracers can be called where RCU is not watching * the function tracers can be called where RCU is not watching
...@@ -2816,9 +2816,18 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) ...@@ -2816,9 +2816,18 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* infrastructure to do the synchronization, thus we must do it * infrastructure to do the synchronization, thus we must do it
* ourselves. * ourselves.
*/ */
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
schedule_on_each_cpu(ftrace_sync); schedule_on_each_cpu(ftrace_sync);
/*
* When the kernel is preeptive, tasks can be preempted
* while on a ftrace trampoline. Just scheduling a task on
* a CPU is not good enough to flush them. Calling
* synchornize_rcu_tasks() will wait for those tasks to
* execute and either schedule voluntarily or enter user space.
*/
if (IS_ENABLED(CONFIG_PREEMPT))
synchronize_rcu_tasks();
arch_ftrace_trampoline_free(ops); arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_PER_CPU) if (ops->flags & FTRACE_OPS_FL_PER_CPU)
...@@ -3055,34 +3064,63 @@ struct ftrace_iterator { ...@@ -3055,34 +3064,63 @@ struct ftrace_iterator {
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *func; struct dyn_ftrace *func;
struct ftrace_func_probe *probe; struct ftrace_func_probe *probe;
struct ftrace_func_entry *probe_entry;
struct trace_parser parser; struct trace_parser parser;
struct ftrace_hash *hash; struct ftrace_hash *hash;
struct ftrace_ops *ops; struct ftrace_ops *ops;
int hidx; int pidx;
int idx; int idx;
unsigned flags; unsigned flags;
}; };
static void * static void *
t_hash_next(struct seq_file *m, loff_t *pos) t_probe_next(struct seq_file *m, loff_t *pos)
{ {
struct ftrace_iterator *iter = m->private; struct ftrace_iterator *iter = m->private;
struct trace_array *tr = iter->ops->private;
struct list_head *func_probes;
struct ftrace_hash *hash;
struct list_head *next;
struct hlist_node *hnd = NULL; struct hlist_node *hnd = NULL;
struct hlist_head *hhd; struct hlist_head *hhd;
int size;
(*pos)++; (*pos)++;
iter->pos = *pos; iter->pos = *pos;
if (iter->probe) if (!tr)
hnd = &iter->probe->node; return NULL;
func_probes = &tr->func_probes;
if (list_empty(func_probes))
return NULL;
if (!iter->probe) {
next = func_probes->next;
iter->probe = list_entry(next, struct ftrace_func_probe, list);
}
if (iter->probe_entry)
hnd = &iter->probe_entry->hlist;
hash = iter->probe->ops.func_hash->filter_hash;
size = 1 << hash->size_bits;
retry: retry:
if (iter->hidx >= FTRACE_FUNC_HASHSIZE) if (iter->pidx >= size) {
if (iter->probe->list.next == func_probes)
return NULL; return NULL;
next = iter->probe->list.next;
iter->probe = list_entry(next, struct ftrace_func_probe, list);
hash = iter->probe->ops.func_hash->filter_hash;
size = 1 << hash->size_bits;
iter->pidx = 0;
}
hhd = &ftrace_func_hash[iter->hidx]; hhd = &hash->buckets[iter->pidx];
if (hlist_empty(hhd)) { if (hlist_empty(hhd)) {
iter->hidx++; iter->pidx++;
hnd = NULL; hnd = NULL;
goto retry; goto retry;
} }
...@@ -3092,7 +3130,7 @@ t_hash_next(struct seq_file *m, loff_t *pos) ...@@ -3092,7 +3130,7 @@ t_hash_next(struct seq_file *m, loff_t *pos)
else { else {
hnd = hnd->next; hnd = hnd->next;
if (!hnd) { if (!hnd) {
iter->hidx++; iter->pidx++;
goto retry; goto retry;
} }
} }
...@@ -3100,26 +3138,28 @@ t_hash_next(struct seq_file *m, loff_t *pos) ...@@ -3100,26 +3138,28 @@ t_hash_next(struct seq_file *m, loff_t *pos)
if (WARN_ON_ONCE(!hnd)) if (WARN_ON_ONCE(!hnd))
return NULL; return NULL;
iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
return iter; return iter;
} }
static void *t_hash_start(struct seq_file *m, loff_t *pos) static void *t_probe_start(struct seq_file *m, loff_t *pos)
{ {
struct ftrace_iterator *iter = m->private; struct ftrace_iterator *iter = m->private;
void *p = NULL; void *p = NULL;
loff_t l; loff_t l;
if (!(iter->flags & FTRACE_ITER_DO_HASH)) if (!(iter->flags & FTRACE_ITER_DO_PROBES))
return NULL; return NULL;
if (iter->func_pos > *pos) if (iter->func_pos > *pos)
return NULL; return NULL;
iter->hidx = 0; iter->probe = NULL;
iter->probe_entry = NULL;
iter->pidx = 0;
for (l = 0; l <= (*pos - iter->func_pos); ) { for (l = 0; l <= (*pos - iter->func_pos); ) {
p = t_hash_next(m, &l); p = t_probe_next(m, &l);
if (!p) if (!p)
break; break;
} }
...@@ -3127,50 +3167,42 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) ...@@ -3127,50 +3167,42 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
return NULL; return NULL;
/* Only set this if we have an item */ /* Only set this if we have an item */
iter->flags |= FTRACE_ITER_HASH; iter->flags |= FTRACE_ITER_PROBE;
return iter; return iter;
} }
static int static int
t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
{ {
struct ftrace_func_probe *rec; struct ftrace_func_entry *probe_entry;
struct ftrace_probe_ops *probe_ops;
struct ftrace_func_probe *probe;
probe = iter->probe;
probe_entry = iter->probe_entry;
rec = iter->probe; if (WARN_ON_ONCE(!probe || !probe_entry))
if (WARN_ON_ONCE(!rec))
return -EIO; return -EIO;
if (rec->ops->print) probe_ops = probe->probe_ops;
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); if (probe_ops->print)
return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
if (rec->data) seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
seq_printf(m, ":%p", rec->data); (void *)probe_ops->func);
seq_putc(m, '\n');
return 0; return 0;
} }
static void * static void *
t_next(struct seq_file *m, void *v, loff_t *pos) t_func_next(struct seq_file *m, loff_t *pos)
{ {
struct ftrace_iterator *iter = m->private; struct ftrace_iterator *iter = m->private;
struct ftrace_ops *ops = iter->ops;
struct dyn_ftrace *rec = NULL; struct dyn_ftrace *rec = NULL;
if (unlikely(ftrace_disabled))
return NULL;
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_next(m, pos);
(*pos)++; (*pos)++;
iter->pos = iter->func_pos = *pos;
if (iter->flags & FTRACE_ITER_PRINTALL)
return t_hash_start(m, pos);
retry: retry:
if (iter->idx >= iter->pg->index) { if (iter->idx >= iter->pg->index) {
...@@ -3181,11 +3213,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -3181,11 +3213,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
} }
} else { } else {
rec = &iter->pg->records[iter->idx++]; rec = &iter->pg->records[iter->idx++];
if (((iter->flags & FTRACE_ITER_FILTER) && if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
!(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || !ftrace_lookup_ip(iter->hash, rec->ip)) ||
((iter->flags & FTRACE_ITER_NOTRACE) &&
!ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
((iter->flags & FTRACE_ITER_ENABLED) && ((iter->flags & FTRACE_ITER_ENABLED) &&
!(rec->flags & FTRACE_FL_ENABLED))) { !(rec->flags & FTRACE_FL_ENABLED))) {
...@@ -3196,24 +3225,51 @@ t_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -3196,24 +3225,51 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
} }
if (!rec) if (!rec)
return t_hash_start(m, pos); return NULL;
iter->pos = iter->func_pos = *pos;
iter->func = rec; iter->func = rec;
return iter; return iter;
} }
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
loff_t l = *pos; /* t_hash_start() must use original pos */
void *ret;
if (unlikely(ftrace_disabled))
return NULL;
if (iter->flags & FTRACE_ITER_PROBE)
return t_probe_next(m, pos);
if (iter->flags & FTRACE_ITER_PRINTALL) {
/* next must increment pos, and t_probe_start does not */
(*pos)++;
return t_probe_start(m, &l);
}
ret = t_func_next(m, pos);
if (!ret)
return t_probe_start(m, &l);
return ret;
}
static void reset_iter_read(struct ftrace_iterator *iter) static void reset_iter_read(struct ftrace_iterator *iter)
{ {
iter->pos = 0; iter->pos = 0;
iter->func_pos = 0; iter->func_pos = 0;
iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE);
} }
static void *t_start(struct seq_file *m, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos)
{ {
struct ftrace_iterator *iter = m->private; struct ftrace_iterator *iter = m->private;
struct ftrace_ops *ops = iter->ops;
void *p = NULL; void *p = NULL;
loff_t l; loff_t l;
...@@ -3233,20 +3289,19 @@ static void *t_start(struct seq_file *m, loff_t *pos) ...@@ -3233,20 +3289,19 @@ static void *t_start(struct seq_file *m, loff_t *pos)
* off, we can short cut and just print out that all * off, we can short cut and just print out that all
* functions are enabled. * functions are enabled.
*/ */
if ((iter->flags & FTRACE_ITER_FILTER && if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
ftrace_hash_empty(ops->func_hash->filter_hash)) || ftrace_hash_empty(iter->hash)) {
(iter->flags & FTRACE_ITER_NOTRACE && iter->func_pos = 1; /* Account for the message */
ftrace_hash_empty(ops->func_hash->notrace_hash))) {
if (*pos > 0) if (*pos > 0)
return t_hash_start(m, pos); return t_probe_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL; iter->flags |= FTRACE_ITER_PRINTALL;
/* reset in case of seek/pread */ /* reset in case of seek/pread */
iter->flags &= ~FTRACE_ITER_HASH; iter->flags &= ~FTRACE_ITER_PROBE;
return iter; return iter;
} }
if (iter->flags & FTRACE_ITER_HASH) if (iter->flags & FTRACE_ITER_PROBE)
return t_hash_start(m, pos); return t_probe_start(m, pos);
/* /*
* Unfortunately, we need to restart at ftrace_pages_start * Unfortunately, we need to restart at ftrace_pages_start
...@@ -3256,13 +3311,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) ...@@ -3256,13 +3311,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
iter->pg = ftrace_pages_start; iter->pg = ftrace_pages_start;
iter->idx = 0; iter->idx = 0;
for (l = 0; l <= *pos; ) { for (l = 0; l <= *pos; ) {
p = t_next(m, p, &l); p = t_func_next(m, &l);
if (!p) if (!p)
break; break;
} }
if (!p) if (!p)
return t_hash_start(m, pos); return t_probe_start(m, pos);
return iter; return iter;
} }
...@@ -3293,8 +3348,8 @@ static int t_show(struct seq_file *m, void *v) ...@@ -3293,8 +3348,8 @@ static int t_show(struct seq_file *m, void *v)
struct ftrace_iterator *iter = m->private; struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
if (iter->flags & FTRACE_ITER_HASH) if (iter->flags & FTRACE_ITER_PROBE)
return t_hash_show(m, iter); return t_probe_show(m, iter);
if (iter->flags & FTRACE_ITER_PRINTALL) { if (iter->flags & FTRACE_ITER_PRINTALL) {
if (iter->flags & FTRACE_ITER_NOTRACE) if (iter->flags & FTRACE_ITER_NOTRACE)
...@@ -3355,12 +3410,13 @@ ftrace_avail_open(struct inode *inode, struct file *file) ...@@ -3355,12 +3410,13 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return -ENODEV; return -ENODEV;
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
if (iter) { if (!iter)
return -ENOMEM;
iter->pg = ftrace_pages_start; iter->pg = ftrace_pages_start;
iter->ops = &global_ops; iter->ops = &global_ops;
}
return iter ? 0 : -ENOMEM; return 0;
} }
static int static int
...@@ -3369,13 +3425,14 @@ ftrace_enabled_open(struct inode *inode, struct file *file) ...@@ -3369,13 +3425,14 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
struct ftrace_iterator *iter; struct ftrace_iterator *iter;
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
if (iter) { if (!iter)
return -ENOMEM;
iter->pg = ftrace_pages_start; iter->pg = ftrace_pages_start;
iter->flags = FTRACE_ITER_ENABLED; iter->flags = FTRACE_ITER_ENABLED;
iter->ops = &global_ops; iter->ops = &global_ops;
}
return iter ? 0 : -ENOMEM; return 0;
} }
/** /**
...@@ -3440,7 +3497,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, ...@@ -3440,7 +3497,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
} } else
iter->hash = hash;
if (file->f_mode & FMODE_READ) { if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start; iter->pg = ftrace_pages_start;
...@@ -3470,7 +3528,7 @@ ftrace_filter_open(struct inode *inode, struct file *file) ...@@ -3470,7 +3528,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
struct ftrace_ops *ops = inode->i_private; struct ftrace_ops *ops = inode->i_private;
return ftrace_regex_open(ops, return ftrace_regex_open(ops,
FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
inode, file); inode, file);
} }
...@@ -3654,6 +3712,56 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) ...@@ -3654,6 +3712,56 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
return match_records(hash, buff, len, NULL); return match_records(hash, buff, len, NULL);
} }
static void ftrace_ops_update_code(struct ftrace_ops *ops,
struct ftrace_ops_hash *old_hash)
{
struct ftrace_ops *op;
if (!ftrace_enabled)
return;
if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
return;
}
/*
* If this is the shared global_ops filter, then we need to
* check if there is another ops that shares it, is enabled.
* If so, we still need to run the modify code.
*/
if (ops->func_hash != &global_ops.local_hash)
return;
do_for_each_ftrace_op(op, ftrace_ops_list) {
if (op->func_hash == &global_ops.local_hash &&
op->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
/* Only need to do this once */
return;
}
} while_for_each_ftrace_op(op);
}
static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
struct ftrace_hash **orig_hash,
struct ftrace_hash *hash,
int enable)
{
struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
int ret;
old_hash = *orig_hash;
old_hash_ops.filter_hash = ops->func_hash->filter_hash;
old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) {
ftrace_ops_update_code(ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
return ret;
}
/* /*
* We register the module command as a template to show others how * We register the module command as a template to show others how
...@@ -3661,7 +3769,7 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) ...@@ -3661,7 +3769,7 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
*/ */
static int static int
ftrace_mod_callback(struct ftrace_hash *hash, ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
char *func, char *cmd, char *module, int enable) char *func, char *cmd, char *module, int enable)
{ {
int ret; int ret;
...@@ -3695,16 +3803,11 @@ core_initcall(ftrace_mod_cmd_init); ...@@ -3695,16 +3803,11 @@ core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ftrace_func_probe *entry; struct ftrace_probe_ops *probe_ops;
struct hlist_head *hhd; struct ftrace_func_probe *probe;
unsigned long key;
key = hash_long(ip, FTRACE_HASH_BITS);
hhd = &ftrace_func_hash[key];
if (hlist_empty(hhd)) probe = container_of(op, struct ftrace_func_probe, ops);
return; probe_ops = probe->probe_ops;
/* /*
* Disable preemption for these calls to prevent a RCU grace * Disable preemption for these calls to prevent a RCU grace
...@@ -3712,209 +3815,336 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, ...@@ -3712,209 +3815,336 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here. * on the hash. rcu_read_lock is too dangerous here.
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
hlist_for_each_entry_rcu_notrace(entry, hhd, node) { probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
preempt_enable_notrace(); preempt_enable_notrace();
} }
static struct ftrace_ops trace_probe_ops __read_mostly = struct ftrace_func_map {
{ struct ftrace_func_entry entry;
.func = function_trace_probe_call, void *data;
.flags = FTRACE_OPS_FL_INITIALIZED,
INIT_OPS_HASH(trace_probe_ops)
}; };
static int ftrace_probe_registered; struct ftrace_func_mapper {
struct ftrace_hash hash;
};
static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) /**
* allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
*
* Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
*/
struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
{ {
int ret; struct ftrace_hash *hash;
int i;
if (ftrace_probe_registered) { /*
/* still need to update the function call sites */ * The mapper is simply a ftrace_hash, but since the entries
if (ftrace_enabled) * in the hash are not ftrace_func_entry type, we define it
ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, * as a separate structure.
old_hash); */
return; hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
} return (struct ftrace_func_mapper *)hash;
}
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { /**
struct hlist_head *hhd = &ftrace_func_hash[i]; * ftrace_func_mapper_find_ip - Find some data mapped to an ip
if (hhd->first) * @mapper: The mapper that has the ip maps
break; * @ip: the instruction pointer to find the data for
} *
/* Nothing registered? */ * Returns the data mapped to @ip if found otherwise NULL. The return
if (i == FTRACE_FUNC_HASHSIZE) * is actually the address of the mapper data pointer. The address is
return; * returned for use cases where the data is no bigger than a long, and
* the user can use the data pointer as its data instead of having to
* allocate more memory for the reference.
*/
void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
unsigned long ip)
{
struct ftrace_func_entry *entry;
struct ftrace_func_map *map;
ret = ftrace_startup(&trace_probe_ops, 0); entry = ftrace_lookup_ip(&mapper->hash, ip);
if (!entry)
return NULL;
ftrace_probe_registered = 1; map = (struct ftrace_func_map *)entry;
return &map->data;
} }
static bool __disable_ftrace_function_probe(void) /**
* ftrace_func_mapper_add_ip - Map some data to an ip
* @mapper: The mapper that has the ip maps
* @ip: The instruction pointer address to map @data to
* @data: The data to map to @ip
*
* Returns 0 on succes otherwise an error.
*/
int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
unsigned long ip, void *data)
{ {
int i; struct ftrace_func_entry *entry;
struct ftrace_func_map *map;
if (!ftrace_probe_registered) entry = ftrace_lookup_ip(&mapper->hash, ip);
return false; if (entry)
return -EBUSY;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { map = kmalloc(sizeof(*map), GFP_KERNEL);
struct hlist_head *hhd = &ftrace_func_hash[i]; if (!map)
if (hhd->first) return -ENOMEM;
return false;
}
/* no more funcs left */ map->entry.ip = ip;
ftrace_shutdown(&trace_probe_ops, 0); map->data = data;
ftrace_probe_registered = 0; __add_hash_entry(&mapper->hash, &map->entry);
return true;
}
return 0;
}
static void ftrace_free_entry(struct ftrace_func_probe *entry) /**
* ftrace_func_mapper_remove_ip - Remove an ip from the mapping
* @mapper: The mapper that has the ip maps
* @ip: The instruction pointer address to remove the data from
*
* Returns the data if it is found, otherwise NULL.
* Note, if the data pointer is used as the data itself, (see
* ftrace_func_mapper_find_ip(), then the return value may be meaningless,
* if the data pointer was set to zero.
*/
void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
unsigned long ip)
{ {
if (entry->ops->free) struct ftrace_func_entry *entry;
entry->ops->free(entry->ops, entry->ip, &entry->data); struct ftrace_func_map *map;
void *data;
entry = ftrace_lookup_ip(&mapper->hash, ip);
if (!entry)
return NULL;
map = (struct ftrace_func_map *)entry;
data = map->data;
remove_hash_entry(&mapper->hash, entry);
kfree(entry); kfree(entry);
return data;
}
/**
* free_ftrace_func_mapper - free a mapping of ips and data
* @mapper: The mapper that has the ip maps
* @free_func: A function to be called on each data item.
*
* This is used to free the function mapper. The @free_func is optional
* and can be used if the data needs to be freed as well.
*/
void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
ftrace_mapper_func free_func)
{
struct ftrace_func_entry *entry;
struct ftrace_func_map *map;
struct hlist_head *hhd;
int size = 1 << mapper->hash.size_bits;
int i;
if (free_func && mapper->hash.count) {
for (i = 0; i < size; i++) {
hhd = &mapper->hash.buckets[i];
hlist_for_each_entry(entry, hhd, hlist) {
map = (struct ftrace_func_map *)entry;
free_func(map);
}
}
}
free_ftrace_hash(&mapper->hash);
}
static void release_probe(struct ftrace_func_probe *probe)
{
struct ftrace_probe_ops *probe_ops;
mutex_lock(&ftrace_lock);
WARN_ON(probe->ref <= 0);
/* Subtract the ref that was used to protect this instance */
probe->ref--;
if (!probe->ref) {
probe_ops = probe->probe_ops;
/*
* Sending zero as ip tells probe_ops to free
* the probe->data itself
*/
if (probe_ops->free)
probe_ops->free(probe_ops, probe->tr, 0, probe->data);
list_del(&probe->list);
kfree(probe);
}
mutex_unlock(&ftrace_lock);
}
static void acquire_probe_locked(struct ftrace_func_probe *probe)
{
/*
* Add one ref to keep it from being freed when releasing the
* ftrace_lock mutex.
*/
probe->ref++;
} }
int int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, register_ftrace_function_probe(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops,
void *data) void *data)
{ {
struct ftrace_ops_hash old_hash_ops; struct ftrace_func_entry *entry;
struct ftrace_func_probe *entry; struct ftrace_func_probe *probe;
struct ftrace_glob func_g; struct ftrace_hash **orig_hash;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; struct ftrace_hash *old_hash;
struct ftrace_hash *old_hash = *orig_hash;
struct ftrace_hash *hash; struct ftrace_hash *hash;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
int not;
unsigned long key;
int count = 0; int count = 0;
int size;
int ret; int ret;
int i;
func_g.type = filter_parse_regex(glob, strlen(glob), if (WARN_ON(!tr))
&func_g.search, &not);
func_g.len = strlen(func_g.search);
/* we do not support '!' for function probes */
if (WARN_ON(not))
return -EINVAL; return -EINVAL;
mutex_lock(&trace_probe_ops.func_hash->regex_lock); /* We do not support '!' for function probes */
if (WARN_ON(glob[0] == '!'))
return -EINVAL;
old_hash_ops.filter_hash = old_hash;
/* Probes only have filters */
old_hash_ops.notrace_hash = NULL;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); mutex_lock(&ftrace_lock);
if (!hash) { /* Check if the probe_ops is already registered */
count = -ENOMEM; list_for_each_entry(probe, &tr->func_probes, list) {
goto out; if (probe->probe_ops == probe_ops)
break;
} }
if (&probe->list == &tr->func_probes) {
if (unlikely(ftrace_disabled)) { probe = kzalloc(sizeof(*probe), GFP_KERNEL);
count = -ENODEV; if (!probe) {
goto out; mutex_unlock(&ftrace_lock);
return -ENOMEM;
}
probe->probe_ops = probe_ops;
probe->ops.func = function_trace_probe_call;
probe->tr = tr;
ftrace_ops_init(&probe->ops);
list_add(&probe->list, &tr->func_probes);
} }
mutex_lock(&ftrace_lock); acquire_probe_locked(probe);
do_for_each_ftrace_rec(pg, rec) { mutex_unlock(&ftrace_lock);
if (rec->flags & FTRACE_FL_DISABLED) mutex_lock(&probe->ops.func_hash->regex_lock);
continue;
if (!ftrace_match_record(rec, &func_g, NULL, 0)) orig_hash = &probe->ops.func_hash->filter_hash;
continue; old_hash = *orig_hash;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
entry = kmalloc(sizeof(*entry), GFP_KERNEL); ret = ftrace_match_records(hash, glob, strlen(glob));
if (!entry) {
/* If we did not process any, then return error */
if (!count)
count = -ENOMEM;
goto out_unlock;
}
count++; /* Nothing found? */
if (!ret)
ret = -EINVAL;
entry->data = data; if (ret < 0)
goto out;
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
if (ftrace_lookup_ip(old_hash, entry->ip))
continue;
/* /*
* The caller might want to do something special * The caller might want to do something special
* for each function we find. We call the callback * for each function we find. We call the callback
* to give the caller an opportunity to do so. * to give the caller an opportunity to do so.
*/ */
if (ops->init) { if (probe_ops->init) {
if (ops->init(ops, rec->ip, &entry->data) < 0) { ret = probe_ops->init(probe_ops, tr,
/* caller does not like this func */ entry->ip, data,
kfree(entry); &probe->data);
continue; if (ret < 0) {
if (probe_ops->free && count)
probe_ops->free(probe_ops, tr,
0, probe->data);
probe->data = NULL;
goto out;
} }
} }
count++;
ret = enter_record(hash, rec, 0); }
if (ret < 0) {
kfree(entry);
count = ret;
goto out_unlock;
} }
entry->ops = ops; mutex_lock(&ftrace_lock);
entry->ip = rec->ip;
key = hash_long(entry->ip, FTRACE_HASH_BITS);
hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
} while_for_each_ftrace_rec(); if (!count) {
/* Nothing was added? */
ret = -EINVAL;
goto out_unlock;
}
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
hash, 1);
if (ret < 0)
goto err_unlock;
__enable_ftrace_function_probe(&old_hash_ops); /* One ref for each new function traced */
probe->ref += count;
if (!ret) if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
free_ftrace_hash_rcu(old_hash); ret = ftrace_startup(&probe->ops, 0);
else
count = ret;
out_unlock: out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
if (!ret)
ret = count;
out: out:
mutex_unlock(&trace_probe_ops.func_hash->regex_lock); mutex_unlock(&probe->ops.func_hash->regex_lock);
free_ftrace_hash(hash); free_ftrace_hash(hash);
return count; release_probe(probe);
}
enum { return ret;
PROBE_TEST_FUNC = 1,
PROBE_TEST_DATA = 2
};
static void err_unlock:
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, if (!probe_ops->free || !count)
void *data, int flags) goto out_unlock;
/* Failed to do the move, need to call the free functions */
for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
if (ftrace_lookup_ip(old_hash, entry->ip))
continue;
probe_ops->free(probe_ops, tr, entry->ip, probe->data);
}
}
goto out_unlock;
}
int
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops)
{ {
struct ftrace_ops_hash old_hash_ops; struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *rec_entry; struct ftrace_func_entry *entry;
struct ftrace_func_probe *entry; struct ftrace_func_probe *probe;
struct ftrace_func_probe *p;
struct ftrace_glob func_g; struct ftrace_glob func_g;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash = *orig_hash; struct ftrace_hash *old_hash;
struct list_head free_list; struct ftrace_hash *hash = NULL;
struct ftrace_hash *hash;
struct hlist_node *tmp; struct hlist_node *tmp;
struct hlist_head hhd;
char str[KSYM_SYMBOL_LEN]; char str[KSYM_SYMBOL_LEN];
int i, ret; int count = 0;
bool disabled; int i, ret = -ENODEV;
int size;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
func_g.search = NULL; func_g.search = NULL;
...@@ -3928,95 +4158,104 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3928,95 +4158,104 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
/* we do not support '!' for function probes */ /* we do not support '!' for function probes */
if (WARN_ON(not)) if (WARN_ON(not))
return; return -EINVAL;
} }
mutex_lock(&trace_probe_ops.func_hash->regex_lock); mutex_lock(&ftrace_lock);
/* Check if the probe_ops is already registered */
list_for_each_entry(probe, &tr->func_probes, list) {
if (probe->probe_ops == probe_ops)
break;
}
if (&probe->list == &tr->func_probes)
goto err_unlock_ftrace;
ret = -EINVAL;
if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
goto err_unlock_ftrace;
acquire_probe_locked(probe);
mutex_unlock(&ftrace_lock);
mutex_lock(&probe->ops.func_hash->regex_lock);
orig_hash = &probe->ops.func_hash->filter_hash;
old_hash = *orig_hash;
if (ftrace_hash_empty(old_hash))
goto out_unlock;
old_hash_ops.filter_hash = old_hash; old_hash_ops.filter_hash = old_hash;
/* Probes only have filters */ /* Probes only have filters */
old_hash_ops.notrace_hash = NULL; old_hash_ops.notrace_hash = NULL;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); ret = -ENOMEM;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) if (!hash)
/* Hmm, should report this somehow */
goto out_unlock; goto out_unlock;
INIT_LIST_HEAD(&free_list); INIT_HLIST_HEAD(&hhd);
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
hlist_for_each_entry_safe(entry, tmp, hhd, node) {
/* break up if statements for readability */ size = 1 << hash->size_bits;
if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) for (i = 0; i < size; i++) {
continue; hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
if ((flags & PROBE_TEST_DATA) && entry->data != data)
continue;
/* do this last, since it is the most expensive */
if (func_g.search) { if (func_g.search) {
kallsyms_lookup(entry->ip, NULL, NULL, kallsyms_lookup(entry->ip, NULL, NULL,
NULL, str); NULL, str);
if (!ftrace_match(str, &func_g)) if (!ftrace_match(str, &func_g))
continue; continue;
} }
count++;
rec_entry = ftrace_lookup_ip(hash, entry->ip); remove_hash_entry(hash, entry);
/* It is possible more than one entry had this ip */ hlist_add_head(&entry->hlist, &hhd);
if (rec_entry)
free_hash_entry(hash, rec_entry);
hlist_del_rcu(&entry->node);
list_add(&entry->free_list, &free_list);
} }
} }
/* Nothing found? */
if (!count) {
ret = -EINVAL;
goto out_unlock;
}
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
disabled = __disable_ftrace_function_probe();
/* WARN_ON(probe->ref < count);
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*. probe->ref -= count;
*/
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); if (ftrace_hash_empty(hash))
ftrace_shutdown(&probe->ops, 0);
ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
hash, 1);
/* still need to update the function call sites */ /* still need to update the function call sites */
if (ftrace_enabled && !disabled) if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops); &old_hash_ops);
synchronize_sched(); synchronize_sched();
if (!ret)
free_ftrace_hash_rcu(old_hash);
list_for_each_entry_safe(entry, p, &free_list, free_list) { hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
list_del(&entry->free_list); hlist_del(&entry->hlist);
ftrace_free_entry(entry); if (probe_ops->free)
probe_ops->free(probe_ops, tr, entry->ip, probe->data);
kfree(entry);
} }
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
out_unlock: out_unlock:
mutex_unlock(&trace_probe_ops.func_hash->regex_lock); mutex_unlock(&probe->ops.func_hash->regex_lock);
free_ftrace_hash(hash); free_ftrace_hash(hash);
}
void release_probe(probe);
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
__unregister_ftrace_function_probe(glob, ops, data,
PROBE_TEST_FUNC | PROBE_TEST_DATA);
}
void return ret;
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
{
__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
}
void unregister_ftrace_function_probe_all(char *glob) err_unlock_ftrace:
{ mutex_unlock(&ftrace_lock);
__unregister_ftrace_function_probe(glob, NULL, NULL, 0); return ret;
} }
static LIST_HEAD(ftrace_commands); static LIST_HEAD(ftrace_commands);
...@@ -4068,9 +4307,11 @@ __init int unregister_ftrace_command(struct ftrace_func_command *cmd) ...@@ -4068,9 +4307,11 @@ __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
return ret; return ret;
} }
static int ftrace_process_regex(struct ftrace_hash *hash, static int ftrace_process_regex(struct ftrace_iterator *iter,
char *buff, int len, int enable) char *buff, int len, int enable)
{ {
struct ftrace_hash *hash = iter->hash;
struct trace_array *tr = iter->ops->private;
char *func, *command, *next = buff; char *func, *command, *next = buff;
struct ftrace_func_command *p; struct ftrace_func_command *p;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -4090,10 +4331,13 @@ static int ftrace_process_regex(struct ftrace_hash *hash, ...@@ -4090,10 +4331,13 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
command = strsep(&next, ":"); command = strsep(&next, ":");
if (WARN_ON_ONCE(!tr))
return -EINVAL;
mutex_lock(&ftrace_cmd_mutex); mutex_lock(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) { list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0) { if (strcmp(p->name, command) == 0) {
ret = p->func(hash, func, command, next, enable); ret = p->func(tr, hash, func, command, next, enable);
goto out_unlock; goto out_unlock;
} }
} }
...@@ -4130,7 +4374,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, ...@@ -4130,7 +4374,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
if (read >= 0 && trace_parser_loaded(parser) && if (read >= 0 && trace_parser_loaded(parser) &&
!trace_parser_cont(parser)) { !trace_parser_cont(parser)) {
ret = ftrace_process_regex(iter->hash, parser->buffer, ret = ftrace_process_regex(iter, parser->buffer,
parser->idx, enable); parser->idx, enable);
trace_parser_clear(parser); trace_parser_clear(parser);
if (ret < 0) if (ret < 0)
...@@ -4175,44 +4419,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) ...@@ -4175,44 +4419,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return add_hash_entry(hash, ip); return add_hash_entry(hash, ip);
} }
static void ftrace_ops_update_code(struct ftrace_ops *ops,
struct ftrace_ops_hash *old_hash)
{
struct ftrace_ops *op;
if (!ftrace_enabled)
return;
if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
return;
}
/*
* If this is the shared global_ops filter, then we need to
* check if there is another ops that shares it, is enabled.
* If so, we still need to run the modify code.
*/
if (ops->func_hash != &global_ops.local_hash)
return;
do_for_each_ftrace_op(op, ftrace_ops_list) {
if (op->func_hash == &global_ops.local_hash &&
op->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
/* Only need to do this once */
return;
}
} while_for_each_ftrace_op(op);
}
static int static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable) unsigned long ip, int remove, int reset, int enable)
{ {
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash; struct ftrace_hash *hash;
int ret; int ret;
...@@ -4247,14 +4458,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ...@@ -4247,14 +4458,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
} }
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
old_hash = *orig_hash; ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
old_hash_ops.filter_hash = ops->func_hash->filter_hash;
old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) {
ftrace_ops_update_code(ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
out_regex_unlock: out_regex_unlock:
...@@ -4493,10 +4697,8 @@ static void __init set_ftrace_early_filters(void) ...@@ -4493,10 +4697,8 @@ static void __init set_ftrace_early_filters(void)
int ftrace_regex_release(struct inode *inode, struct file *file) int ftrace_regex_release(struct inode *inode, struct file *file)
{ {
struct seq_file *m = (struct seq_file *)file->private_data; struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_ops_hash old_hash_ops;
struct ftrace_iterator *iter; struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
struct trace_parser *parser; struct trace_parser *parser;
int filter_hash; int filter_hash;
int ret; int ret;
...@@ -4526,16 +4728,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) ...@@ -4526,16 +4728,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
orig_hash = &iter->ops->func_hash->notrace_hash; orig_hash = &iter->ops->func_hash->notrace_hash;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
old_hash = *orig_hash; ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; iter->hash, filter_hash);
old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
if (!ret) {
ftrace_ops_update_code(iter->ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} else {
/* For read only, the hash is the ops hash */
iter->hash = NULL;
} }
mutex_unlock(&iter->ops->func_hash->regex_lock); mutex_unlock(&iter->ops->func_hash->regex_lock);
...@@ -5274,6 +5472,50 @@ void ftrace_module_init(struct module *mod) ...@@ -5274,6 +5472,50 @@ void ftrace_module_init(struct module *mod)
} }
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
void __init ftrace_free_init_mem(void)
{
unsigned long start = (unsigned long)(&__init_begin);
unsigned long end = (unsigned long)(&__init_end);
struct ftrace_page **last_pg = &ftrace_pages_start;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
struct dyn_ftrace key;
int order;
key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */
mutex_lock(&ftrace_lock);
for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
if (end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
again:
rec = bsearch(&key, pg->records, pg->index,
sizeof(struct dyn_ftrace),
ftrace_cmp_recs);
if (!rec)
continue;
pg->index--;
if (!pg->index) {
*last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order);
kfree(pg);
pg = container_of(last_pg, struct ftrace_page, next);
if (!(*last_pg))
ftrace_pages = pg;
continue;
}
memmove(rec, rec + 1,
(pg->index - (rec - pg->records)) * sizeof(*rec));
/* More than one function may be in this block */
goto again;
}
mutex_unlock(&ftrace_lock);
}
void __init ftrace_init(void) void __init ftrace_init(void)
{ {
extern unsigned long __start_mcount_loc[]; extern unsigned long __start_mcount_loc[];
...@@ -5316,25 +5558,13 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) ...@@ -5316,25 +5558,13 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
static void ftrace_update_trampoline(struct ftrace_ops *ops) static void ftrace_update_trampoline(struct ftrace_ops *ops)
{ {
/*
* Currently there's no safe way to free a trampoline when the kernel
* is configured with PREEMPT. That is because a task could be preempted
* when it jumped to the trampoline, it may be preempted for a long time
* depending on the system load, and currently there's no way to know
* when it will be off the trampoline. If the trampoline is freed
* too early, when the task runs again, it will be executing on freed
* memory and crash.
*/
#ifdef CONFIG_PREEMPT
/* Currently, only non dynamic ops can have a trampoline */
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
return;
#endif
arch_ftrace_update_trampoline(ops); arch_ftrace_update_trampoline(ops);
} }
void ftrace_init_trace_array(struct trace_array *tr)
{
INIT_LIST_HEAD(&tr->func_probes);
}
#else #else
static struct ftrace_ops global_ops = { static struct ftrace_ops global_ops = {
...@@ -5389,6 +5619,7 @@ __init void ftrace_init_global_array_ops(struct trace_array *tr) ...@@ -5389,6 +5619,7 @@ __init void ftrace_init_global_array_ops(struct trace_array *tr)
{ {
tr->ops = &global_ops; tr->ops = &global_ops;
tr->ops->private = tr; tr->ops->private = tr;
ftrace_init_trace_array(tr);
} }
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
...@@ -5543,6 +5774,43 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, ...@@ -5543,6 +5774,43 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
trace_ignore_this_task(pid_list, next)); trace_ignore_this_task(pid_list, next));
} }
static void
ftrace_pid_follow_sched_process_fork(void *data,
struct task_struct *self,
struct task_struct *task)
{
struct trace_pid_list *pid_list;
struct trace_array *tr = data;
pid_list = rcu_dereference_sched(tr->function_pids);
trace_filter_add_remove_task(pid_list, self, task);
}
static void
ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
{
struct trace_pid_list *pid_list;
struct trace_array *tr = data;
pid_list = rcu_dereference_sched(tr->function_pids);
trace_filter_add_remove_task(pid_list, NULL, task);
}
void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
{
if (enable) {
register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr);
register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
tr);
} else {
unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr);
unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
tr);
}
}
static void clear_ftrace_pids(struct trace_array *tr) static void clear_ftrace_pids(struct trace_array *tr)
{ {
struct trace_pid_list *pid_list; struct trace_pid_list *pid_list;
......
...@@ -438,6 +438,7 @@ struct ring_buffer_per_cpu { ...@@ -438,6 +438,7 @@ struct ring_buffer_per_cpu {
raw_spinlock_t reader_lock; /* serialize readers */ raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock; arch_spinlock_t lock;
struct lock_class_key lock_key; struct lock_class_key lock_key;
struct buffer_data_page *free_page;
unsigned long nr_pages; unsigned long nr_pages;
unsigned int current_context; unsigned int current_context;
struct list_head *pages; struct list_head *pages;
...@@ -4389,9 +4390,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); ...@@ -4389,9 +4390,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
*/ */
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{ {
struct buffer_data_page *bpage; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct buffer_data_page *bpage = NULL;
unsigned long flags;
struct page *page; struct page *page;
local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock);
if (cpu_buffer->free_page) {
bpage = cpu_buffer->free_page;
cpu_buffer->free_page = NULL;
}
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
if (bpage)
goto out;
page = alloc_pages_node(cpu_to_node(cpu), page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0); GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
...@@ -4399,6 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) ...@@ -4399,6 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
bpage = page_address(page); bpage = page_address(page);
out:
rb_init_page(bpage); rb_init_page(bpage);
return bpage; return bpage;
...@@ -4408,13 +4426,29 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); ...@@ -4408,13 +4426,29 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
/** /**
* ring_buffer_free_read_page - free an allocated read page * ring_buffer_free_read_page - free an allocated read page
* @buffer: the buffer the page was allocate for * @buffer: the buffer the page was allocate for
* @cpu: the cpu buffer the page came from
* @data: the page to free * @data: the page to free
* *
* Free a page allocated from ring_buffer_alloc_read_page. * Free a page allocated from ring_buffer_alloc_read_page.
*/ */
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
{ {
free_page((unsigned long)data); struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct buffer_data_page *bpage = data;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock);
if (!cpu_buffer->free_page) {
cpu_buffer->free_page = bpage;
bpage = NULL;
}
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
free_page((unsigned long)bpage);
} }
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
......
...@@ -171,7 +171,7 @@ static enum event_status read_page(int cpu) ...@@ -171,7 +171,7 @@ static enum event_status read_page(int cpu)
} }
} }
} }
ring_buffer_free_read_page(buffer, bpage); ring_buffer_free_read_page(buffer, cpu, bpage);
if (ret < 0) if (ret < 0)
return EVENT_DROPPED; return EVENT_DROPPED;
......
...@@ -257,7 +257,7 @@ unsigned long long ns2usecs(u64 nsec) ...@@ -257,7 +257,7 @@ unsigned long long ns2usecs(u64 nsec)
/* trace_flags that are default zero for instances */ /* trace_flags that are default zero for instances */
#define ZEROED_TRACE_FLAGS \ #define ZEROED_TRACE_FLAGS \
TRACE_ITER_EVENT_FORK (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
/* /*
* The global_trace is the descriptor that holds the top-level tracing * The global_trace is the descriptor that holds the top-level tracing
...@@ -757,7 +757,7 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -757,7 +757,7 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
return event; return event;
} }
static void tracer_tracing_on(struct trace_array *tr) void tracer_tracing_on(struct trace_array *tr)
{ {
if (tr->trace_buffer.buffer) if (tr->trace_buffer.buffer)
ring_buffer_record_on(tr->trace_buffer.buffer); ring_buffer_record_on(tr->trace_buffer.buffer);
...@@ -894,23 +894,8 @@ int __trace_bputs(unsigned long ip, const char *str) ...@@ -894,23 +894,8 @@ int __trace_bputs(unsigned long ip, const char *str)
EXPORT_SYMBOL_GPL(__trace_bputs); EXPORT_SYMBOL_GPL(__trace_bputs);
#ifdef CONFIG_TRACER_SNAPSHOT #ifdef CONFIG_TRACER_SNAPSHOT
/** static void tracing_snapshot_instance(struct trace_array *tr)
* trace_snapshot - take a snapshot of the current buffer.
*
* This causes a swap between the snapshot buffer and the current live
* tracing buffer. You can use this to take snapshots of the live
* trace when some condition is triggered, but continue to trace.
*
* Note, make sure to allocate the snapshot with either
* a tracing_snapshot_alloc(), or by doing it manually
* with: echo 1 > /sys/kernel/debug/tracing/snapshot
*
* If the snapshot buffer is not allocated, it will stop tracing.
* Basically making a permanent snapshot.
*/
void tracing_snapshot(void)
{ {
struct trace_array *tr = &global_trace;
struct tracer *tracer = tr->current_trace; struct tracer *tracer = tr->current_trace;
unsigned long flags; unsigned long flags;
...@@ -938,6 +923,27 @@ void tracing_snapshot(void) ...@@ -938,6 +923,27 @@ void tracing_snapshot(void)
update_max_tr(tr, current, smp_processor_id()); update_max_tr(tr, current, smp_processor_id());
local_irq_restore(flags); local_irq_restore(flags);
} }
/**
* trace_snapshot - take a snapshot of the current buffer.
*
* This causes a swap between the snapshot buffer and the current live
* tracing buffer. You can use this to take snapshots of the live
* trace when some condition is triggered, but continue to trace.
*
* Note, make sure to allocate the snapshot with either
* a tracing_snapshot_alloc(), or by doing it manually
* with: echo 1 > /sys/kernel/debug/tracing/snapshot
*
* If the snapshot buffer is not allocated, it will stop tracing.
* Basically making a permanent snapshot.
*/
void tracing_snapshot(void)
{
struct trace_array *tr = &global_trace;
tracing_snapshot_instance(tr);
}
EXPORT_SYMBOL_GPL(tracing_snapshot); EXPORT_SYMBOL_GPL(tracing_snapshot);
static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
...@@ -1039,7 +1045,7 @@ void tracing_snapshot_alloc(void) ...@@ -1039,7 +1045,7 @@ void tracing_snapshot_alloc(void)
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
#endif /* CONFIG_TRACER_SNAPSHOT */ #endif /* CONFIG_TRACER_SNAPSHOT */
static void tracer_tracing_off(struct trace_array *tr) void tracer_tracing_off(struct trace_array *tr)
{ {
if (tr->trace_buffer.buffer) if (tr->trace_buffer.buffer)
ring_buffer_record_off(tr->trace_buffer.buffer); ring_buffer_record_off(tr->trace_buffer.buffer);
...@@ -1424,6 +1430,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full) ...@@ -1424,6 +1430,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full)
} }
#ifdef CONFIG_FTRACE_STARTUP_TEST #ifdef CONFIG_FTRACE_STARTUP_TEST
static bool selftests_can_run;
struct trace_selftests {
struct list_head list;
struct tracer *type;
};
static LIST_HEAD(postponed_selftests);
static int save_selftest(struct tracer *type)
{
struct trace_selftests *selftest;
selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
if (!selftest)
return -ENOMEM;
selftest->type = type;
list_add(&selftest->list, &postponed_selftests);
return 0;
}
static int run_tracer_selftest(struct tracer *type) static int run_tracer_selftest(struct tracer *type)
{ {
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
...@@ -1433,6 +1461,14 @@ static int run_tracer_selftest(struct tracer *type) ...@@ -1433,6 +1461,14 @@ static int run_tracer_selftest(struct tracer *type)
if (!type->selftest || tracing_selftest_disabled) if (!type->selftest || tracing_selftest_disabled)
return 0; return 0;
/*
* If a tracer registers early in boot up (before scheduling is
* initialized and such), then do not run its selftests yet.
* Instead, run it a little later in the boot process.
*/
if (!selftests_can_run)
return save_selftest(type);
/* /*
* Run a selftest on this tracer. * Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current * Here we reset the trace buffer, and set the current
...@@ -1482,6 +1518,47 @@ static int run_tracer_selftest(struct tracer *type) ...@@ -1482,6 +1518,47 @@ static int run_tracer_selftest(struct tracer *type)
printk(KERN_CONT "PASSED\n"); printk(KERN_CONT "PASSED\n");
return 0; return 0;
} }
static __init int init_trace_selftests(void)
{
struct trace_selftests *p, *n;
struct tracer *t, **last;
int ret;
selftests_can_run = true;
mutex_lock(&trace_types_lock);
if (list_empty(&postponed_selftests))
goto out;
pr_info("Running postponed tracer tests:\n");
list_for_each_entry_safe(p, n, &postponed_selftests, list) {
ret = run_tracer_selftest(p->type);
/* If the test fails, then warn and remove from available_tracers */
if (ret < 0) {
WARN(1, "tracer: %s failed selftest, disabling\n",
p->type->name);
last = &trace_types;
for (t = trace_types; t; t = t->next) {
if (t == p->type) {
*last = t->next;
break;
}
last = &t->next;
}
}
list_del(&p->list);
kfree(p);
}
out:
mutex_unlock(&trace_types_lock);
return 0;
}
early_initcall(init_trace_selftests);
#else #else
static inline int run_tracer_selftest(struct tracer *type) static inline int run_tracer_selftest(struct tracer *type)
{ {
...@@ -1927,6 +2004,18 @@ void tracing_record_cmdline(struct task_struct *tsk) ...@@ -1927,6 +2004,18 @@ void tracing_record_cmdline(struct task_struct *tsk)
__this_cpu_write(trace_cmdline_save, false); __this_cpu_write(trace_cmdline_save, false);
} }
/*
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
* simplifies those functions and keeps them in sync.
*/
enum print_line_t trace_handle_return(struct trace_seq *s)
{
return trace_seq_has_overflowed(s) ?
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
}
EXPORT_SYMBOL_GPL(trace_handle_return);
void void
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
int pc) int pc)
...@@ -4122,6 +4211,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) ...@@ -4122,6 +4211,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (mask == TRACE_ITER_EVENT_FORK) if (mask == TRACE_ITER_EVENT_FORK)
trace_event_follow_fork(tr, enabled); trace_event_follow_fork(tr, enabled);
if (mask == TRACE_ITER_FUNC_FORK)
ftrace_pid_follow_fork(tr, enabled);
if (mask == TRACE_ITER_OVERWRITE) { if (mask == TRACE_ITER_OVERWRITE) {
ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
...@@ -5962,6 +6054,7 @@ static int tracing_clock_open(struct inode *inode, struct file *file) ...@@ -5962,6 +6054,7 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
struct ftrace_buffer_info { struct ftrace_buffer_info {
struct trace_iterator iter; struct trace_iterator iter;
void *spare; void *spare;
unsigned int spare_cpu;
unsigned int read; unsigned int read;
}; };
...@@ -6291,9 +6384,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, ...@@ -6291,9 +6384,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
return -EBUSY; return -EBUSY;
#endif #endif
if (!info->spare) if (!info->spare) {
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
iter->cpu_file); iter->cpu_file);
info->spare_cpu = iter->cpu_file;
}
if (!info->spare) if (!info->spare)
return -ENOMEM; return -ENOMEM;
...@@ -6353,7 +6448,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) ...@@ -6353,7 +6448,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
__trace_array_put(iter->tr); __trace_array_put(iter->tr);
if (info->spare) if (info->spare)
ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); ring_buffer_free_read_page(iter->trace_buffer->buffer,
info->spare_cpu, info->spare);
kfree(info); kfree(info);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
...@@ -6364,6 +6460,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) ...@@ -6364,6 +6460,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
struct buffer_ref { struct buffer_ref {
struct ring_buffer *buffer; struct ring_buffer *buffer;
void *page; void *page;
int cpu;
int ref; int ref;
}; };
...@@ -6375,7 +6472,7 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, ...@@ -6375,7 +6472,7 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
if (--ref->ref) if (--ref->ref)
return; return;
ring_buffer_free_read_page(ref->buffer, ref->page); ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref); kfree(ref);
buf->private = 0; buf->private = 0;
} }
...@@ -6409,7 +6506,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) ...@@ -6409,7 +6506,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
if (--ref->ref) if (--ref->ref)
return; return;
ring_buffer_free_read_page(ref->buffer, ref->page); ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref); kfree(ref);
spd->partial[i].private = 0; spd->partial[i].private = 0;
} }
...@@ -6473,11 +6570,13 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -6473,11 +6570,13 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
kfree(ref); kfree(ref);
break; break;
} }
ref->cpu = iter->cpu_file;
r = ring_buffer_read_page(ref->buffer, &ref->page, r = ring_buffer_read_page(ref->buffer, &ref->page,
len, iter->cpu_file, 1); len, iter->cpu_file, 1);
if (r < 0) { if (r < 0) {
ring_buffer_free_read_page(ref->buffer, ref->page); ring_buffer_free_read_page(ref->buffer, ref->cpu,
ref->page);
kfree(ref); kfree(ref);
break; break;
} }
...@@ -6648,43 +6747,89 @@ static const struct file_operations tracing_dyn_info_fops = { ...@@ -6648,43 +6747,89 @@ static const struct file_operations tracing_dyn_info_fops = {
#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
static void static void
ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
tracing_snapshot(); tracing_snapshot_instance(tr);
} }
static void static void
ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
unsigned long *count = (long *)data; struct ftrace_func_mapper *mapper = data;
long *count = NULL;
if (mapper)
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
if (count) {
if (!*count) if (*count <= 0)
return; return;
if (*count != -1)
(*count)--; (*count)--;
}
tracing_snapshot(); tracing_snapshot_instance(tr);
} }
static int static int
ftrace_snapshot_print(struct seq_file *m, unsigned long ip, ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *ops, void *data)
{ {
long count = (long)data; struct ftrace_func_mapper *mapper = data;
long *count = NULL;
seq_printf(m, "%ps:", (void *)ip); seq_printf(m, "%ps:", (void *)ip);
seq_puts(m, "snapshot"); seq_puts(m, "snapshot");
if (count == -1) if (mapper)
seq_puts(m, ":unlimited\n"); count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
if (count)
seq_printf(m, ":count=%ld\n", *count);
else else
seq_printf(m, ":count=%ld\n", count); seq_puts(m, ":unlimited\n");
return 0; return 0;
} }
static int
ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
unsigned long ip, void *init_data, void **data)
{
struct ftrace_func_mapper *mapper = *data;
if (!mapper) {
mapper = allocate_ftrace_func_mapper();
if (!mapper)
return -ENOMEM;
*data = mapper;
}
return ftrace_func_mapper_add_ip(mapper, ip, init_data);
}
static void
ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
unsigned long ip, void *data)
{
struct ftrace_func_mapper *mapper = data;
if (!ip) {
if (!mapper)
return;
free_ftrace_func_mapper(mapper, NULL);
return;
}
ftrace_func_mapper_remove_ip(mapper, ip);
}
static struct ftrace_probe_ops snapshot_probe_ops = { static struct ftrace_probe_ops snapshot_probe_ops = {
.func = ftrace_snapshot, .func = ftrace_snapshot,
.print = ftrace_snapshot_print, .print = ftrace_snapshot_print,
...@@ -6693,10 +6838,12 @@ static struct ftrace_probe_ops snapshot_probe_ops = { ...@@ -6693,10 +6838,12 @@ static struct ftrace_probe_ops snapshot_probe_ops = {
static struct ftrace_probe_ops snapshot_count_probe_ops = { static struct ftrace_probe_ops snapshot_count_probe_ops = {
.func = ftrace_count_snapshot, .func = ftrace_count_snapshot,
.print = ftrace_snapshot_print, .print = ftrace_snapshot_print,
.init = ftrace_snapshot_init,
.free = ftrace_snapshot_free,
}; };
static int static int
ftrace_trace_snapshot_callback(struct ftrace_hash *hash, ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable) char *glob, char *cmd, char *param, int enable)
{ {
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
...@@ -6710,10 +6857,8 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, ...@@ -6710,10 +6857,8 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
if (glob[0] == '!') { if (glob[0] == '!')
unregister_ftrace_function_probe_func(glob+1, ops); return unregister_ftrace_function_probe_func(glob+1, tr, ops);
return 0;
}
if (!param) if (!param)
goto out_reg; goto out_reg;
...@@ -6732,11 +6877,11 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, ...@@ -6732,11 +6877,11 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
return ret; return ret;
out_reg: out_reg:
ret = alloc_snapshot(&global_trace); ret = alloc_snapshot(tr);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = register_ftrace_function_probe(glob, ops, count); ret = register_ftrace_function_probe(glob, tr, ops, count);
out: out:
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
...@@ -7347,6 +7492,8 @@ static int instance_mkdir(const char *name) ...@@ -7347,6 +7492,8 @@ static int instance_mkdir(const char *name)
goto out_free_tr; goto out_free_tr;
} }
ftrace_init_trace_array(tr);
init_tracer_tracefs(tr, tr->dir); init_tracer_tracefs(tr, tr->dir);
init_trace_flags_index(tr); init_trace_flags_index(tr);
__update_tracer_options(tr); __update_tracer_options(tr);
...@@ -7967,6 +8114,9 @@ __init static int tracer_alloc_buffers(void) ...@@ -7967,6 +8114,9 @@ __init static int tracer_alloc_buffers(void)
register_tracer(&nop_trace); register_tracer(&nop_trace);
/* Function tracing may start here (via kernel command line) */
init_function_trace();
/* All seems OK, enable tracing */ /* All seems OK, enable tracing */
tracing_disabled = 0; tracing_disabled = 0;
...@@ -8001,7 +8151,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -8001,7 +8151,7 @@ __init static int tracer_alloc_buffers(void)
return ret; return ret;
} }
void __init trace_init(void) void __init early_trace_init(void)
{ {
if (tracepoint_printk) { if (tracepoint_printk) {
tracepoint_print_iter = tracepoint_print_iter =
...@@ -8012,6 +8162,10 @@ void __init trace_init(void) ...@@ -8012,6 +8162,10 @@ void __init trace_init(void)
static_key_enable(&tracepoint_printk_key.key); static_key_enable(&tracepoint_printk_key.key);
} }
tracer_alloc_buffers(); tracer_alloc_buffers();
}
void __init trace_init(void)
{
trace_event_init(); trace_event_init();
} }
......
...@@ -262,6 +262,9 @@ struct trace_array { ...@@ -262,6 +262,9 @@ struct trace_array {
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops; struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids; struct trace_pid_list __rcu *function_pids;
#ifdef CONFIG_DYNAMIC_FTRACE
struct list_head func_probes;
#endif
/* function tracing enabled */ /* function tracing enabled */
int function_enabled; int function_enabled;
#endif #endif
...@@ -579,6 +582,8 @@ void tracing_reset_all_online_cpus(void); ...@@ -579,6 +582,8 @@ void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp); int tracing_open_generic(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void); bool tracing_is_disabled(void);
int tracer_tracing_is_on(struct trace_array *tr); int tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
void tracer_tracing_off(struct trace_array *tr);
struct dentry *trace_create_file(const char *name, struct dentry *trace_create_file(const char *name,
umode_t mode, umode_t mode,
struct dentry *parent, struct dentry *parent,
...@@ -696,6 +701,9 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable); ...@@ -696,6 +701,9 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt; extern unsigned long ftrace_update_tot_cnt;
void ftrace_init_trace_array(struct trace_array *tr);
#else
static inline void ftrace_init_trace_array(struct trace_array *tr) { }
#endif #endif
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void); extern int DYN_FTRACE_TEST_NAME(void);
...@@ -880,6 +888,14 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) ...@@ -880,6 +888,14 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
extern struct list_head ftrace_pids; extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
struct ftrace_func_command {
struct list_head list;
char *name;
int (*func)(struct trace_array *tr,
struct ftrace_hash *hash,
char *func, char *cmd,
char *params, int enable);
};
extern bool ftrace_filter_param __initdata; extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
...@@ -897,6 +913,8 @@ void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); ...@@ -897,6 +913,8 @@ void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
void ftrace_init_tracefs_toplevel(struct trace_array *tr, void ftrace_init_tracefs_toplevel(struct trace_array *tr,
struct dentry *d_tracer); struct dentry *d_tracer);
void ftrace_clear_pids(struct trace_array *tr); void ftrace_clear_pids(struct trace_array *tr);
int init_function_trace(void);
void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
#else #else
static inline int ftrace_trace_task(struct trace_array *tr) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
...@@ -916,15 +934,70 @@ static inline void ftrace_reset_array_ops(struct trace_array *tr) { } ...@@ -916,15 +934,70 @@ static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
static inline void ftrace_clear_pids(struct trace_array *tr) { } static inline void ftrace_clear_pids(struct trace_array *tr) { }
static inline int init_function_trace(void) { return 0; }
static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
/* ftace_func_t type is not defined, use macro instead of static inline */ /* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0) #define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
struct ftrace_probe_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
struct trace_array *tr,
struct ftrace_probe_ops *ops,
void *data);
int (*init)(struct ftrace_probe_ops *ops,
struct trace_array *tr,
unsigned long ip, void *init_data,
void **data);
void (*free)(struct ftrace_probe_ops *ops,
struct trace_array *tr,
unsigned long ip, void *data);
int (*print)(struct seq_file *m,
unsigned long ip,
struct ftrace_probe_ops *ops,
void *data);
};
struct ftrace_func_mapper;
typedef int (*ftrace_mapper_func)(void *data);
struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
unsigned long ip);
int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
unsigned long ip, void *data);
void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
unsigned long ip);
void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
ftrace_mapper_func free_func);
extern int
register_ftrace_function_probe(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *ops, void *data);
extern int
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
void ftrace_create_filter_files(struct ftrace_ops *ops, void ftrace_create_filter_files(struct ftrace_ops *ops,
struct dentry *parent); struct dentry *parent);
void ftrace_destroy_filter_files(struct ftrace_ops *ops); void ftrace_destroy_filter_files(struct ftrace_ops *ops);
#else #else
struct ftrace_func_command;
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
}
static inline __init int unregister_ftrace_command(char *cmd_name)
{
return -EINVAL;
}
/* /*
* The ops parameter passed in is usually undefined. * The ops parameter passed in is usually undefined.
* This must be a macro. * This must be a macro.
...@@ -989,11 +1062,13 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, ...@@ -989,11 +1062,13 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
# define FUNCTION_FLAGS \ # define FUNCTION_FLAGS \
C(FUNCTION, "function-trace"), C(FUNCTION, "function-trace"), \
C(FUNC_FORK, "function-fork"),
# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
#else #else
# define FUNCTION_FLAGS # define FUNCTION_FLAGS
# define FUNCTION_DEFAULT_FLAGS 0UL # define FUNCTION_DEFAULT_FLAGS 0UL
# define TRACE_ITER_FUNC_FORK 0UL
#endif #endif
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
......
...@@ -153,10 +153,18 @@ static int benchmark_event_kthread(void *arg) ...@@ -153,10 +153,18 @@ static int benchmark_event_kthread(void *arg)
trace_do_benchmark(); trace_do_benchmark();
/* /*
* We don't go to sleep, but let others * We don't go to sleep, but let others run as well.
* run as well. * This is bascially a "yield()" to let any task that
* wants to run, schedule in, but if the CPU is idle,
* we'll keep burning cycles.
*
* Note the _rcu_qs() version of cond_resched() will
* notify synchronize_rcu_tasks() that this thread has
* passed a quiescent state for rcu_tasks. Otherwise
* this thread will never voluntarily schedule which would
* block synchronize_rcu_tasks() indefinitely.
*/ */
cond_resched(); cond_resched_rcu_qs();
} }
return 0; return 0;
......
...@@ -2460,15 +2460,8 @@ struct event_probe_data { ...@@ -2460,15 +2460,8 @@ struct event_probe_data {
bool enable; bool enable;
}; };
static void static void update_event_probe(struct event_probe_data *data)
event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
{ {
struct event_probe_data **pdata = (struct event_probe_data **)_data;
struct event_probe_data *data = *pdata;
if (!data)
return;
if (data->enable) if (data->enable)
clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
else else
...@@ -2476,77 +2469,141 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) ...@@ -2476,77 +2469,141 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
} }
static void static void
event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data) event_enable_probe(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
struct event_probe_data **pdata = (struct event_probe_data **)_data; struct ftrace_func_mapper *mapper = data;
struct event_probe_data *data = *pdata; struct event_probe_data *edata;
void **pdata;
if (!data) pdata = ftrace_func_mapper_find_ip(mapper, ip);
if (!pdata || !*pdata)
return;
edata = *pdata;
update_event_probe(edata);
}
static void
event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{
struct ftrace_func_mapper *mapper = data;
struct event_probe_data *edata;
void **pdata;
pdata = ftrace_func_mapper_find_ip(mapper, ip);
if (!pdata || !*pdata)
return; return;
if (!data->count) edata = *pdata;
if (!edata->count)
return; return;
/* Skip if the event is in a state we want to switch to */ /* Skip if the event is in a state we want to switch to */
if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
return; return;
if (data->count != -1) if (edata->count != -1)
(data->count)--; (edata->count)--;
event_enable_probe(ip, parent_ip, _data); update_event_probe(edata);
} }
static int static int
event_enable_print(struct seq_file *m, unsigned long ip, event_enable_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *_data) struct ftrace_probe_ops *ops, void *data)
{ {
struct event_probe_data *data = _data; struct ftrace_func_mapper *mapper = data;
struct event_probe_data *edata;
void **pdata;
pdata = ftrace_func_mapper_find_ip(mapper, ip);
if (WARN_ON_ONCE(!pdata || !*pdata))
return 0;
edata = *pdata;
seq_printf(m, "%ps:", (void *)ip); seq_printf(m, "%ps:", (void *)ip);
seq_printf(m, "%s:%s:%s", seq_printf(m, "%s:%s:%s",
data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
data->file->event_call->class->system, edata->file->event_call->class->system,
trace_event_name(data->file->event_call)); trace_event_name(edata->file->event_call));
if (data->count == -1) if (edata->count == -1)
seq_puts(m, ":unlimited\n"); seq_puts(m, ":unlimited\n");
else else
seq_printf(m, ":count=%ld\n", data->count); seq_printf(m, ":count=%ld\n", edata->count);
return 0; return 0;
} }
static int static int
event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
void **_data) unsigned long ip, void *init_data, void **data)
{ {
struct event_probe_data **pdata = (struct event_probe_data **)_data; struct ftrace_func_mapper *mapper = *data;
struct event_probe_data *data = *pdata; struct event_probe_data *edata = init_data;
int ret;
if (!mapper) {
mapper = allocate_ftrace_func_mapper();
if (!mapper)
return -ENODEV;
*data = mapper;
}
data->ref++; ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
if (ret < 0)
return ret;
edata->ref++;
return 0;
}
static int free_probe_data(void *data)
{
struct event_probe_data *edata = data;
edata->ref--;
if (!edata->ref) {
/* Remove the SOFT_MODE flag */
__ftrace_event_enable_disable(edata->file, 0, 1);
module_put(edata->file->event_call->mod);
kfree(edata);
}
return 0; return 0;
} }
static void static void
event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
void **_data) unsigned long ip, void *data)
{ {
struct event_probe_data **pdata = (struct event_probe_data **)_data; struct ftrace_func_mapper *mapper = data;
struct event_probe_data *data = *pdata; struct event_probe_data *edata;
if (WARN_ON_ONCE(data->ref <= 0)) if (!ip) {
if (!mapper)
return;
free_ftrace_func_mapper(mapper, free_probe_data);
return; return;
data->ref--;
if (!data->ref) {
/* Remove the SOFT_MODE flag */
__ftrace_event_enable_disable(data->file, 0, 1);
module_put(data->file->event_call->mod);
kfree(data);
} }
*pdata = NULL;
edata = ftrace_func_mapper_remove_ip(mapper, ip);
if (WARN_ON_ONCE(!edata))
return;
if (WARN_ON_ONCE(edata->ref <= 0))
return;
free_probe_data(edata);
} }
static struct ftrace_probe_ops event_enable_probe_ops = { static struct ftrace_probe_ops event_enable_probe_ops = {
...@@ -2578,10 +2635,9 @@ static struct ftrace_probe_ops event_disable_count_probe_ops = { ...@@ -2578,10 +2635,9 @@ static struct ftrace_probe_ops event_disable_count_probe_ops = {
}; };
static int static int
event_enable_func(struct ftrace_hash *hash, event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enabled) char *glob, char *cmd, char *param, int enabled)
{ {
struct trace_array *tr = top_trace_array();
struct trace_event_file *file; struct trace_event_file *file;
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
struct event_probe_data *data; struct event_probe_data *data;
...@@ -2619,12 +2675,12 @@ event_enable_func(struct ftrace_hash *hash, ...@@ -2619,12 +2675,12 @@ event_enable_func(struct ftrace_hash *hash,
ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
if (glob[0] == '!') { if (glob[0] == '!') {
unregister_ftrace_function_probe_func(glob+1, ops); ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
ret = 0;
goto out; goto out;
} }
ret = -ENOMEM; ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
goto out; goto out;
...@@ -2661,7 +2717,8 @@ event_enable_func(struct ftrace_hash *hash, ...@@ -2661,7 +2717,8 @@ event_enable_func(struct ftrace_hash *hash,
ret = __ftrace_event_enable_disable(file, 1, 1); ret = __ftrace_event_enable_disable(file, 1, 1);
if (ret < 0) if (ret < 0)
goto out_put; goto out_put;
ret = register_ftrace_function_probe(glob, ops, data);
ret = register_ftrace_function_probe(glob, tr, ops, data);
/* /*
* The above returns on success the # of functions enabled, * The above returns on success the # of functions enabled,
* but if it didn't find any functions it returns zero. * but if it didn't find any functions it returns zero.
......
...@@ -267,10 +267,14 @@ static struct tracer function_trace __tracer_data = ...@@ -267,10 +267,14 @@ static struct tracer function_trace __tracer_data =
}; };
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
static void update_traceon_count(void **data, bool on) static void update_traceon_count(struct ftrace_probe_ops *ops,
unsigned long ip,
struct trace_array *tr, bool on,
void *data)
{ {
long *count = (long *)data; struct ftrace_func_mapper *mapper = data;
long old_count = *count; long *count;
long old_count;
/* /*
* Tracing gets disabled (or enabled) once per count. * Tracing gets disabled (or enabled) once per count.
...@@ -301,23 +305,22 @@ static void update_traceon_count(void **data, bool on) ...@@ -301,23 +305,22 @@ static void update_traceon_count(void **data, bool on)
* setting the tracing_on file. But we currently don't care * setting the tracing_on file. But we currently don't care
* about that. * about that.
*/ */
if (!old_count) count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
old_count = *count;
if (old_count <= 0)
return; return;
/* Make sure we see count before checking tracing state */ /* Make sure we see count before checking tracing state */
smp_rmb(); smp_rmb();
if (on == !!tracing_is_on()) if (on == !!tracer_tracing_is_on(tr))
return; return;
if (on) if (on)
tracing_on(); tracer_tracing_on(tr);
else else
tracing_off(); tracer_tracing_off(tr);
/* unlimited? */
if (old_count == -1)
return;
/* Make sure tracing state is visible before updating count */ /* Make sure tracing state is visible before updating count */
smp_wmb(); smp_wmb();
...@@ -326,33 +329,41 @@ static void update_traceon_count(void **data, bool on) ...@@ -326,33 +329,41 @@ static void update_traceon_count(void **data, bool on)
} }
static void static void
ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
update_traceon_count(data, 1); update_traceon_count(ops, ip, tr, 1, data);
} }
static void static void
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
update_traceon_count(data, 0); update_traceon_count(ops, ip, tr, 0, data);
} }
static void static void
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceon(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
if (tracing_is_on()) if (tracer_tracing_is_on(tr))
return; return;
tracing_on(); tracer_tracing_on(tr);
} }
static void static void
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
if (!tracing_is_on()) if (!tracer_tracing_is_on(tr))
return; return;
tracing_off(); tracer_tracing_off(tr);
} }
/* /*
...@@ -364,144 +375,218 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) ...@@ -364,144 +375,218 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
*/ */
#define STACK_SKIP 4 #define STACK_SKIP 4
static __always_inline void trace_stack(struct trace_array *tr)
{
unsigned long flags;
int pc;
local_save_flags(flags);
pc = preempt_count();
__trace_stack(tr, flags, STACK_SKIP, pc);
}
static void static void
ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
trace_dump_stack(STACK_SKIP); trace_stack(tr);
} }
static void static void
ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
long *count = (long *)data; struct ftrace_func_mapper *mapper = data;
long *count;
long old_count; long old_count;
long new_count; long new_count;
if (!tracing_is_on())
return;
/* unlimited? */
if (!mapper) {
trace_stack(tr);
return;
}
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
/* /*
* Stack traces should only execute the number of times the * Stack traces should only execute the number of times the
* user specified in the counter. * user specified in the counter.
*/ */
do { do {
if (!tracing_is_on())
return;
old_count = *count; old_count = *count;
if (!old_count) if (!old_count)
return; return;
/* unlimited? */
if (old_count == -1) {
trace_dump_stack(STACK_SKIP);
return;
}
new_count = old_count - 1; new_count = old_count - 1;
new_count = cmpxchg(count, old_count, new_count); new_count = cmpxchg(count, old_count, new_count);
if (new_count == old_count) if (new_count == old_count)
trace_dump_stack(STACK_SKIP); trace_stack(tr);
if (!tracing_is_on())
return;
} while (new_count != old_count); } while (new_count != old_count);
} }
static int update_count(void **data) static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
void *data)
{ {
unsigned long *count = (long *)data; struct ftrace_func_mapper *mapper = data;
long *count = NULL;
if (!*count) if (mapper)
return 0; count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
if (*count != -1) if (count) {
if (*count <= 0)
return 0;
(*count)--; (*count)--;
}
return 1; return 1;
} }
static void static void
ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
if (update_count(data)) if (update_count(ops, ip, data))
ftrace_dump(DUMP_ALL); ftrace_dump(DUMP_ALL);
} }
/* Only dump the current CPU buffer. */ /* Only dump the current CPU buffer. */
static void static void
ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr, struct ftrace_probe_ops *ops,
void *data)
{ {
if (update_count(data)) if (update_count(ops, ip, data))
ftrace_dump(DUMP_ORIG); ftrace_dump(DUMP_ORIG);
} }
static int static int
ftrace_probe_print(const char *name, struct seq_file *m, ftrace_probe_print(const char *name, struct seq_file *m,
unsigned long ip, void *data) unsigned long ip, struct ftrace_probe_ops *ops,
void *data)
{ {
long count = (long)data; struct ftrace_func_mapper *mapper = data;
long *count = NULL;
seq_printf(m, "%ps:%s", (void *)ip, name); seq_printf(m, "%ps:%s", (void *)ip, name);
if (count == -1) if (mapper)
seq_puts(m, ":unlimited\n"); count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
if (count)
seq_printf(m, ":count=%ld\n", *count);
else else
seq_printf(m, ":count=%ld\n", count); seq_puts(m, ":unlimited\n");
return 0; return 0;
} }
static int static int
ftrace_traceon_print(struct seq_file *m, unsigned long ip, ftrace_traceon_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *ops,
void *data)
{ {
return ftrace_probe_print("traceon", m, ip, data); return ftrace_probe_print("traceon", m, ip, ops, data);
} }
static int static int
ftrace_traceoff_print(struct seq_file *m, unsigned long ip, ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *ops, void *data)
{ {
return ftrace_probe_print("traceoff", m, ip, data); return ftrace_probe_print("traceoff", m, ip, ops, data);
} }
static int static int
ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *ops, void *data)
{ {
return ftrace_probe_print("stacktrace", m, ip, data); return ftrace_probe_print("stacktrace", m, ip, ops, data);
} }
static int static int
ftrace_dump_print(struct seq_file *m, unsigned long ip, ftrace_dump_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *ops, void *data)
{ {
return ftrace_probe_print("dump", m, ip, data); return ftrace_probe_print("dump", m, ip, ops, data);
} }
static int static int
ftrace_cpudump_print(struct seq_file *m, unsigned long ip, ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *ops, void *data)
{ {
return ftrace_probe_print("cpudump", m, ip, data); return ftrace_probe_print("cpudump", m, ip, ops, data);
}
static int
ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
unsigned long ip, void *init_data, void **data)
{
struct ftrace_func_mapper *mapper = *data;
if (!mapper) {
mapper = allocate_ftrace_func_mapper();
if (!mapper)
return -ENOMEM;
*data = mapper;
}
return ftrace_func_mapper_add_ip(mapper, ip, init_data);
}
static void
ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
unsigned long ip, void *data)
{
struct ftrace_func_mapper *mapper = data;
if (!ip) {
free_ftrace_func_mapper(mapper, NULL);
return;
}
ftrace_func_mapper_remove_ip(mapper, ip);
} }
static struct ftrace_probe_ops traceon_count_probe_ops = { static struct ftrace_probe_ops traceon_count_probe_ops = {
.func = ftrace_traceon_count, .func = ftrace_traceon_count,
.print = ftrace_traceon_print, .print = ftrace_traceon_print,
.init = ftrace_count_init,
.free = ftrace_count_free,
}; };
static struct ftrace_probe_ops traceoff_count_probe_ops = { static struct ftrace_probe_ops traceoff_count_probe_ops = {
.func = ftrace_traceoff_count, .func = ftrace_traceoff_count,
.print = ftrace_traceoff_print, .print = ftrace_traceoff_print,
.init = ftrace_count_init,
.free = ftrace_count_free,
}; };
static struct ftrace_probe_ops stacktrace_count_probe_ops = { static struct ftrace_probe_ops stacktrace_count_probe_ops = {
.func = ftrace_stacktrace_count, .func = ftrace_stacktrace_count,
.print = ftrace_stacktrace_print, .print = ftrace_stacktrace_print,
.init = ftrace_count_init,
.free = ftrace_count_free,
}; };
static struct ftrace_probe_ops dump_probe_ops = { static struct ftrace_probe_ops dump_probe_ops = {
.func = ftrace_dump_probe, .func = ftrace_dump_probe,
.print = ftrace_dump_print, .print = ftrace_dump_print,
.init = ftrace_count_init,
.free = ftrace_count_free,
}; };
static struct ftrace_probe_ops cpudump_probe_ops = { static struct ftrace_probe_ops cpudump_probe_ops = {
...@@ -525,7 +610,8 @@ static struct ftrace_probe_ops stacktrace_probe_ops = { ...@@ -525,7 +610,8 @@ static struct ftrace_probe_ops stacktrace_probe_ops = {
}; };
static int static int
ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, ftrace_trace_probe_callback(struct trace_array *tr,
struct ftrace_probe_ops *ops,
struct ftrace_hash *hash, char *glob, struct ftrace_hash *hash, char *glob,
char *cmd, char *param, int enable) char *cmd, char *param, int enable)
{ {
...@@ -537,10 +623,8 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, ...@@ -537,10 +623,8 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
if (!enable) if (!enable)
return -EINVAL; return -EINVAL;
if (glob[0] == '!') { if (glob[0] == '!')
unregister_ftrace_function_probe_func(glob+1, ops); return unregister_ftrace_function_probe_func(glob+1, tr, ops);
return 0;
}
if (!param) if (!param)
goto out_reg; goto out_reg;
...@@ -559,13 +643,13 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, ...@@ -559,13 +643,13 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
return ret; return ret;
out_reg: out_reg:
ret = register_ftrace_function_probe(glob, ops, count); ret = register_ftrace_function_probe(glob, tr, ops, count);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
static int static int
ftrace_trace_onoff_callback(struct ftrace_hash *hash, ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable) char *glob, char *cmd, char *param, int enable)
{ {
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
...@@ -576,24 +660,24 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, ...@@ -576,24 +660,24 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
else else
ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
return ftrace_trace_probe_callback(ops, hash, glob, cmd, return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
param, enable); param, enable);
} }
static int static int
ftrace_stacktrace_callback(struct ftrace_hash *hash, ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable) char *glob, char *cmd, char *param, int enable)
{ {
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
return ftrace_trace_probe_callback(ops, hash, glob, cmd, return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
param, enable); param, enable);
} }
static int static int
ftrace_dump_callback(struct ftrace_hash *hash, ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable) char *glob, char *cmd, char *param, int enable)
{ {
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
...@@ -601,12 +685,12 @@ ftrace_dump_callback(struct ftrace_hash *hash, ...@@ -601,12 +685,12 @@ ftrace_dump_callback(struct ftrace_hash *hash,
ops = &dump_probe_ops; ops = &dump_probe_ops;
/* Only dump once. */ /* Only dump once. */
return ftrace_trace_probe_callback(ops, hash, glob, cmd, return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
"1", enable); "1", enable);
} }
static int static int
ftrace_cpudump_callback(struct ftrace_hash *hash, ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable) char *glob, char *cmd, char *param, int enable)
{ {
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
...@@ -614,7 +698,7 @@ ftrace_cpudump_callback(struct ftrace_hash *hash, ...@@ -614,7 +698,7 @@ ftrace_cpudump_callback(struct ftrace_hash *hash,
ops = &cpudump_probe_ops; ops = &cpudump_probe_ops;
/* Only dump once. */ /* Only dump once. */
return ftrace_trace_probe_callback(ops, hash, glob, cmd, return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
"1", enable); "1", enable);
} }
...@@ -687,9 +771,8 @@ static inline int init_func_cmd_traceon(void) ...@@ -687,9 +771,8 @@ static inline int init_func_cmd_traceon(void)
} }
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static __init int init_function_trace(void) __init int init_function_trace(void)
{ {
init_func_cmd_traceon(); init_func_cmd_traceon();
return register_tracer(&function_trace); return register_tracer(&function_trace);
} }
core_initcall(init_function_trace);
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "trace_probe.h" #include "trace_probe.h"
#define KPROBE_EVENT_SYSTEM "kprobes" #define KPROBE_EVENT_SYSTEM "kprobes"
#define KRETPROBE_MAXACTIVE_MAX 4096
/** /**
* Kprobe event core functions * Kprobe event core functions
...@@ -282,6 +283,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, ...@@ -282,6 +283,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
void *addr, void *addr,
const char *symbol, const char *symbol,
unsigned long offs, unsigned long offs,
int maxactive,
int nargs, bool is_return) int nargs, bool is_return)
{ {
struct trace_kprobe *tk; struct trace_kprobe *tk;
...@@ -309,6 +311,8 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, ...@@ -309,6 +311,8 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
else else
tk->rp.kp.pre_handler = kprobe_dispatcher; tk->rp.kp.pre_handler = kprobe_dispatcher;
tk->rp.maxactive = maxactive;
if (!event || !is_good_name(event)) { if (!event || !is_good_name(event)) {
ret = -EINVAL; ret = -EINVAL;
goto error; goto error;
...@@ -598,8 +602,10 @@ static int create_trace_kprobe(int argc, char **argv) ...@@ -598,8 +602,10 @@ static int create_trace_kprobe(int argc, char **argv)
{ {
/* /*
* Argument syntax: * Argument syntax:
* - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] * - Add kprobe:
* - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
* - Add kretprobe:
* r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
* Fetch args: * Fetch args:
* $retval : fetch return value * $retval : fetch return value
* $stack : fetch stack address * $stack : fetch stack address
...@@ -619,6 +625,7 @@ static int create_trace_kprobe(int argc, char **argv) ...@@ -619,6 +625,7 @@ static int create_trace_kprobe(int argc, char **argv)
int i, ret = 0; int i, ret = 0;
bool is_return = false, is_delete = false; bool is_return = false, is_delete = false;
char *symbol = NULL, *event = NULL, *group = NULL; char *symbol = NULL, *event = NULL, *group = NULL;
int maxactive = 0;
char *arg; char *arg;
unsigned long offset = 0; unsigned long offset = 0;
void *addr = NULL; void *addr = NULL;
...@@ -637,8 +644,28 @@ static int create_trace_kprobe(int argc, char **argv) ...@@ -637,8 +644,28 @@ static int create_trace_kprobe(int argc, char **argv)
return -EINVAL; return -EINVAL;
} }
if (argv[0][1] == ':') { event = strchr(&argv[0][1], ':');
event = &argv[0][2]; if (event) {
event[0] = '\0';
event++;
}
if (is_return && isdigit(argv[0][1])) {
ret = kstrtouint(&argv[0][1], 0, &maxactive);
if (ret) {
pr_info("Failed to parse maxactive.\n");
return ret;
}
/* kretprobes instances are iterated over via a list. The
* maximum should stay reasonable.
*/
if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
pr_info("Maxactive is too big (%d > %d).\n",
maxactive, KRETPROBE_MAXACTIVE_MAX);
return -E2BIG;
}
}
if (event) {
if (strchr(event, '/')) { if (strchr(event, '/')) {
group = event; group = event;
event = strchr(group, '/') + 1; event = strchr(group, '/') + 1;
...@@ -715,8 +742,8 @@ static int create_trace_kprobe(int argc, char **argv) ...@@ -715,8 +742,8 @@ static int create_trace_kprobe(int argc, char **argv)
is_return ? 'r' : 'p', addr); is_return ? 'r' : 'p', addr);
event = buf; event = buf;
} }
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
is_return); argc, is_return);
if (IS_ERR(tk)) { if (IS_ERR(tk)) {
pr_info("Failed to allocate trace_probe.(%d)\n", pr_info("Failed to allocate trace_probe.(%d)\n",
(int)PTR_ERR(tk)); (int)PTR_ERR(tk));
......
...@@ -35,7 +35,7 @@ unsigned long stack_trace_max_size; ...@@ -35,7 +35,7 @@ unsigned long stack_trace_max_size;
arch_spinlock_t stack_trace_max_lock = arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static DEFINE_PER_CPU(int, trace_active); DEFINE_PER_CPU(int, disable_stack_tracer);
static DEFINE_MUTEX(stack_sysctl_mutex); static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled; int stack_tracer_enabled;
...@@ -96,6 +96,14 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -96,6 +96,14 @@ check_stack(unsigned long ip, unsigned long *stack)
if (in_nmi()) if (in_nmi())
return; return;
/*
* There's a slight chance that we are tracing inside the
* RCU infrastructure, and rcu_irq_enter() will not work
* as expected.
*/
if (unlikely(rcu_irq_enter_disabled()))
return;
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&stack_trace_max_lock); arch_spin_lock(&stack_trace_max_lock);
...@@ -207,13 +215,12 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -207,13 +215,12 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
unsigned long stack; unsigned long stack;
int cpu;
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */ /* no atomic needed, we only modify this variable by this cpu */
if (per_cpu(trace_active, cpu)++ != 0) __this_cpu_inc(disable_stack_tracer);
if (__this_cpu_read(disable_stack_tracer) != 1)
goto out; goto out;
ip += MCOUNT_INSN_SIZE; ip += MCOUNT_INSN_SIZE;
...@@ -221,7 +228,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -221,7 +228,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
check_stack(ip, &stack); check_stack(ip, &stack);
out: out:
per_cpu(trace_active, cpu)--; __this_cpu_dec(disable_stack_tracer);
/* prevent recursion in schedule */ /* prevent recursion in schedule */
preempt_enable_notrace(); preempt_enable_notrace();
} }
...@@ -253,7 +260,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, ...@@ -253,7 +260,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
long *ptr = filp->private_data; long *ptr = filp->private_data;
unsigned long val, flags; unsigned long val, flags;
int ret; int ret;
int cpu;
ret = kstrtoul_from_user(ubuf, count, 10, &val); ret = kstrtoul_from_user(ubuf, count, 10, &val);
if (ret) if (ret)
...@@ -264,16 +270,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, ...@@ -264,16 +270,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
/* /*
* In case we trace inside arch_spin_lock() or after (NMI), * In case we trace inside arch_spin_lock() or after (NMI),
* we will cause circular lock, so we also need to increase * we will cause circular lock, so we also need to increase
* the percpu trace_active here. * the percpu disable_stack_tracer here.
*/ */
cpu = smp_processor_id(); __this_cpu_inc(disable_stack_tracer);
per_cpu(trace_active, cpu)++;
arch_spin_lock(&stack_trace_max_lock); arch_spin_lock(&stack_trace_max_lock);
*ptr = val; *ptr = val;
arch_spin_unlock(&stack_trace_max_lock); arch_spin_unlock(&stack_trace_max_lock);
per_cpu(trace_active, cpu)--; __this_cpu_dec(disable_stack_tracer);
local_irq_restore(flags); local_irq_restore(flags);
return count; return count;
...@@ -307,12 +312,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -307,12 +312,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos)
{ {
int cpu;
local_irq_disable(); local_irq_disable();
cpu = smp_processor_id(); __this_cpu_inc(disable_stack_tracer);
per_cpu(trace_active, cpu)++;
arch_spin_lock(&stack_trace_max_lock); arch_spin_lock(&stack_trace_max_lock);
...@@ -324,12 +326,9 @@ static void *t_start(struct seq_file *m, loff_t *pos) ...@@ -324,12 +326,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void t_stop(struct seq_file *m, void *p) static void t_stop(struct seq_file *m, void *p)
{ {
int cpu;
arch_spin_unlock(&stack_trace_max_lock); arch_spin_unlock(&stack_trace_max_lock);
cpu = smp_processor_id(); __this_cpu_dec(disable_stack_tracer);
per_cpu(trace_active, cpu)--;
local_irq_enable(); local_irq_enable();
} }
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include <linux/page_owner.h> #include <linux/page_owner.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
......
...@@ -412,6 +412,7 @@ static int ...@@ -412,6 +412,7 @@ static int
is_mcounted_section_name(char const *const txtname) is_mcounted_section_name(char const *const txtname)
{ {
return strcmp(".text", txtname) == 0 || return strcmp(".text", txtname) == 0 ||
strcmp(".init.text", txtname) == 0 ||
strcmp(".ref.text", txtname) == 0 || strcmp(".ref.text", txtname) == 0 ||
strcmp(".sched.text", txtname) == 0 || strcmp(".sched.text", txtname) == 0 ||
strcmp(".spinlock.text", txtname) == 0 || strcmp(".spinlock.text", txtname) == 0 ||
......
...@@ -130,6 +130,7 @@ if ($inputfile =~ m,kernel/trace/ftrace\.o$,) { ...@@ -130,6 +130,7 @@ if ($inputfile =~ m,kernel/trace/ftrace\.o$,) {
# Acceptable sections to record. # Acceptable sections to record.
my %text_sections = ( my %text_sections = (
".text" => 1, ".text" => 1,
".init.text" => 1,
".ref.text" => 1, ".ref.text" => 1,
".sched.text" => 1, ".sched.text" => 1,
".spinlock.text" => 1, ".spinlock.text" => 1,
......
...@@ -16,6 +16,7 @@ echo " -k|--keep Keep passed test logs" ...@@ -16,6 +16,7 @@ echo " -k|--keep Keep passed test logs"
echo " -v|--verbose Increase verbosity of test messages" echo " -v|--verbose Increase verbosity of test messages"
echo " -vv Alias of -v -v (Show all results in stdout)" echo " -vv Alias of -v -v (Show all results in stdout)"
echo " -d|--debug Debug mode (trace all shell commands)" echo " -d|--debug Debug mode (trace all shell commands)"
echo " -l|--logdir <dir> Save logs on the <dir>"
exit $1 exit $1
} }
...@@ -64,6 +65,10 @@ parse_opts() { # opts ...@@ -64,6 +65,10 @@ parse_opts() { # opts
DEBUG=1 DEBUG=1
shift 1 shift 1
;; ;;
--logdir|-l)
LOG_DIR=$2
shift 2
;;
*.tc) *.tc)
if [ -f "$1" ]; then if [ -f "$1" ]; then
OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`" OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`"
...@@ -145,11 +150,16 @@ XFAILED_CASES= ...@@ -145,11 +150,16 @@ XFAILED_CASES=
UNDEFINED_CASES= UNDEFINED_CASES=
TOTAL_RESULT=0 TOTAL_RESULT=0
INSTANCE=
CASENO=0 CASENO=0
testcase() { # testfile testcase() { # testfile
CASENO=$((CASENO+1)) CASENO=$((CASENO+1))
desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:` desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
prlog -n "[$CASENO]$desc" prlog -n "[$CASENO]$INSTANCE$desc"
}
test_on_instance() { # testfile
grep -q "^#[ \t]*flags:.*instance" $1
} }
eval_result() { # sigval eval_result() { # sigval
...@@ -266,6 +276,17 @@ for t in $TEST_CASES; do ...@@ -266,6 +276,17 @@ for t in $TEST_CASES; do
run_test $t run_test $t
done done
# Test on instance loop
INSTANCE=" (instance) "
for t in $TEST_CASES; do
test_on_instance $t || continue
SAVED_TRACING_DIR=$TRACING_DIR
export TRACING_DIR=`mktemp -d $TRACING_DIR/instances/ftracetest.XXXXXX`
run_test $t
rmdir $TRACING_DIR
TRACING_DIR=$SAVED_TRACING_DIR
done
prlog "" prlog ""
prlog "# of passed: " `echo $PASSED_CASES | wc -w` prlog "# of passed: " `echo $PASSED_CASES | wc -w`
prlog "# of failed: " `echo $FAILED_CASES | wc -w` prlog "# of failed: " `echo $FAILED_CASES | wc -w`
......
#!/bin/sh #!/bin/sh
# description: Basic test for tracers # description: Basic test for tracers
# flags: instance
test -f available_tracers test -f available_tracers
for t in `cat available_tracers`; do for t in `cat available_tracers`; do
echo $t > current_tracer echo $t > current_tracer
......
#!/bin/sh #!/bin/sh
# description: Basic trace clock test # description: Basic trace clock test
# flags: instance
test -f trace_clock test -f trace_clock
for c in `cat trace_clock | tr -d \[\]`; do for c in `cat trace_clock | tr -d \[\]`; do
echo $c > trace_clock echo $c > trace_clock
......
#!/bin/sh #!/bin/sh
# description: event tracing - enable/disable with event level files # description: event tracing - enable/disable with event level files
# flags: instance
do_reset() { do_reset() {
echo > set_event echo > set_event
......
#!/bin/sh #!/bin/sh
# description: event tracing - restricts events based on pid # description: event tracing - restricts events based on pid
# flags: instance
do_reset() { do_reset() {
echo > set_event echo > set_event
......
#!/bin/sh #!/bin/sh
# description: event tracing - enable/disable with subsystem level files # description: event tracing - enable/disable with subsystem level files
# flags: instance
do_reset() { do_reset() {
echo > set_event echo > set_event
......
#!/bin/sh
# description: ftrace - test for function event triggers
# flags: instance
#
# Ftrace allows to add triggers to functions, such as enabling or disabling
# tracing, enabling or disabling trace events, or recording a stack trace
# within the ring buffer.
#
# This test is designed to test event triggers
#
# The triggers are set within the set_ftrace_filter file
if [ ! -f set_ftrace_filter ]; then
echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
exit_unsupported
fi
do_reset() {
reset_ftrace_filter
reset_tracer
disable_events
clear_trace
enable_tracing
}
fail() { # mesg
do_reset
echo $1
exit $FAIL
}
SLEEP_TIME=".1"
do_reset
echo "Testing function probes with events:"
EVENT="sched:sched_switch"
EVENT_ENABLE="events/sched/sched_switch/enable"
cnt_trace() {
grep -v '^#' trace | wc -l
}
test_event_enabled() {
val=$1
e=`cat $EVENT_ENABLE`
if [ "$e" != $val ]; then
echo "Expected $val but found $e"
exit -1
fi
}
run_enable_disable() {
enable=$1 # enable
Enable=$2 # Enable
check_disable=$3 # 0
check_enable_star=$4 # 1*
check_disable_star=$5 # 0*
cnt=`cnt_trace`
if [ $cnt -ne 0 ]; then
fail "Found junk in trace file"
fi
echo "$Enable event all the time"
echo $check_disable > $EVENT_ENABLE
sleep $SLEEP_TIME
test_event_enabled $check_disable
echo "schedule:${enable}_event:$EVENT" > set_ftrace_filter
echo " make sure it works 5 times"
for i in `seq 5`; do
sleep $SLEEP_TIME
echo " test $i"
test_event_enabled $check_enable_star
echo $check_disable > $EVENT_ENABLE
done
sleep $SLEEP_TIME
echo " make sure it's still works"
test_event_enabled $check_enable_star
reset_ftrace_filter
echo " make sure it only works 3 times"
echo $check_disable > $EVENT_ENABLE
sleep $SLEEP_TIME
echo "schedule:${enable}_event:$EVENT:3" > set_ftrace_filter
for i in `seq 3`; do
sleep $SLEEP_TIME
echo " test $i"
test_event_enabled $check_enable_star
echo $check_disable > $EVENT_ENABLE
done
sleep $SLEEP_TIME
echo " make sure it stop working"
test_event_enabled $check_disable_star
do_reset
}
run_enable_disable enable Enable 0 "1*" "0*"
run_enable_disable disable Disable 1 "0*" "1*"
#!/bin/sh
# description: ftrace - test reading of set_ftrace_filter
#
# The set_ftrace_filter file of ftrace is used to list functions as well as
# triggers (probes) attached to functions. The code to read this file is not
# straight forward and has had various bugs in the past. This test is designed
# to add functions and triggers to that file in various ways and read that
# file in various ways (cat vs dd).
#
# The triggers are set within the set_ftrace_filter file
if [ ! -f set_ftrace_filter ]; then
echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
exit_unsupported
fi
do_reset() {
reset_tracer
reset_ftrace_filter
disable_events
clear_trace
enable_tracing
}
fail() { # mesg
do_reset
echo $1
exit $FAIL
}
do_reset
FILTER=set_ftrace_filter
FUNC1="schedule"
FUNC2="do_IRQ"
ALL_FUNCS="#### all functions enabled ####"
test_func() {
if ! echo "$1" | grep -q "^$2\$"; then
return 0
fi
echo "$1" | grep -v "^$2\$"
return 1
}
check_set_ftrace_filter() {
cat=`cat $FILTER`
dd1=`dd if=$FILTER bs=1 | grep -v -e 'records in' -e 'records out' -e 'bytes copied'`
dd100=`dd if=$FILTER bs=100 | grep -v -e 'records in' -e 'records out' -e 'bytes copied'`
echo "Testing '$@'"
while [ $# -gt 0 ]; do
echo "test $1"
if cat=`test_func "$cat" "$1"`; then
return 0
fi
if dd1=`test_func "$dd1" "$1"`; then
return 0
fi
if dd100=`test_func "$dd100" "$1"`; then
return 0
fi
shift
done
if [ -n "$cat" ]; then
return 0
fi
if [ -n "$dd1" ]; then
return 0
fi
if [ -n "$dd100" ]; then
return 0
fi
return 1;
}
if check_set_ftrace_filter "$ALL_FUNCS"; then
fail "Expected only $ALL_FUNCS"
fi
echo "$FUNC1:traceoff" > set_ftrace_filter
if check_set_ftrace_filter "$ALL_FUNCS" "$FUNC1:traceoff:unlimited"; then
fail "Expected $ALL_FUNCS and $FUNC1:traceoff:unlimited"
fi
echo "$FUNC1" > set_ftrace_filter
if check_set_ftrace_filter "$FUNC1" "$FUNC1:traceoff:unlimited"; then
fail "Expected $FUNC1 and $FUNC1:traceoff:unlimited"
fi
echo "$FUNC2" >> set_ftrace_filter
if check_set_ftrace_filter "$FUNC1" "$FUNC2" "$FUNC1:traceoff:unlimited"; then
fail "Expected $FUNC1 $FUNC2 and $FUNC1:traceoff:unlimited"
fi
echo "$FUNC2:traceoff" >> set_ftrace_filter
if check_set_ftrace_filter "$FUNC1" "$FUNC2" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then
fail "Expected $FUNC1 $FUNC2 $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited"
fi
echo "$FUNC1" > set_ftrace_filter
if check_set_ftrace_filter "$FUNC1" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then
fail "Expected $FUNC1 $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited"
fi
echo > set_ftrace_filter
if check_set_ftrace_filter "$ALL_FUNCS" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then
fail "Expected $ALL_FUNCS $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited"
fi
reset_ftrace_filter
if check_set_ftrace_filter "$ALL_FUNCS"; then
fail "Expected $ALL_FUNCS"
fi
echo "$FUNC1" > set_ftrace_filter
if check_set_ftrace_filter "$FUNC1" ; then
fail "Expected $FUNC1"
fi
echo "$FUNC2" >> set_ftrace_filter
if check_set_ftrace_filter "$FUNC1" "$FUNC2" ; then
fail "Expected $FUNC1 and $FUNC2"
fi
do_reset
exit 0
#!/bin/sh
# description: ftrace - test for function traceon/off triggers
# flags: instance
#
# Ftrace allows to add triggers to functions, such as enabling or disabling
# tracing, enabling or disabling trace events, or recording a stack trace
# within the ring buffer.
#
# This test is designed to test enabling and disabling tracing triggers
#
# The triggers are set within the set_ftrace_filter file
if [ ! -f set_ftrace_filter ]; then
echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
exit_unsupported
fi
do_reset() {
reset_ftrace_filter
reset_tracer
disable_events
clear_trace
enable_tracing
}
fail() { # mesg
do_reset
echo $1
exit $FAIL
}
SLEEP_TIME=".1"
do_reset
echo "Testing function probes with enabling disabling tracing:"
cnt_trace() {
grep -v '^#' trace | wc -l
}
echo '** DISABLE TRACING'
disable_tracing
clear_trace
cnt=`cnt_trace`
if [ $cnt -ne 0 ]; then
fail "Found junk in trace"
fi
echo '** ENABLE EVENTS'
echo 1 > events/enable
echo '** ENABLE TRACING'
enable_tracing
cnt=`cnt_trace`
if [ $cnt -eq 0 ]; then
fail "Nothing found in trace"
fi
# powerpc uses .schedule
func="schedule"
x=`grep '^\.schedule$' available_filter_functions | wc -l`
if [ "$x" -eq 1 ]; then
func=".schedule"
fi
echo '** SET TRACEOFF'
echo "$func:traceoff" > set_ftrace_filter
cnt=`grep schedule set_ftrace_filter | wc -l`
if [ $cnt -ne 1 ]; then
fail "Did not find traceoff trigger"
fi
cnt=`cnt_trace`
sleep $SLEEP_TIME
cnt2=`cnt_trace`
if [ $cnt -ne $cnt2 ]; then
fail "Tracing is not stopped"
fi
on=`cat tracing_on`
if [ $on != "0" ]; then
fail "Tracing is not off"
fi
line1=`cat trace | tail -1`
sleep $SLEEP_TIME
line2=`cat trace | tail -1`
if [ "$line1" != "$line2" ]; then
fail "Tracing file is still changing"
fi
clear_trace
cnt=`cnt_trace`
if [ $cnt -ne 0 ]; then
fail "Tracing is still happeing"
fi
echo "!$func:traceoff" >> set_ftrace_filter
cnt=`grep schedule set_ftrace_filter | wc -l`
if [ $cnt -ne 0 ]; then
fail "traceoff trigger still exists"
fi
on=`cat tracing_on`
if [ $on != "0" ]; then
fail "Tracing is started again"
fi
echo "$func:traceon" > set_ftrace_filter
cnt=`grep schedule set_ftrace_filter | wc -l`
if [ $cnt -ne 1 ]; then
fail "traceon trigger not found"
fi
cnt=`cnt_trace`
if [ $cnt -eq 0 ]; then
fail "Tracing did not start"
fi
on=`cat tracing_on`
if [ $on != "1" ]; then
fail "Tracing was not enabled"
fi
echo "!$func:traceon" >> set_ftrace_filter
cnt=`grep schedule set_ftrace_filter | wc -l`
if [ $cnt -ne 0 ]; then
fail "traceon trigger still exists"
fi
check_sleep() {
val=$1
sleep $SLEEP_TIME
cat set_ftrace_filter
on=`cat tracing_on`
if [ $on != "$val" ]; then
fail "Expected tracing_on to be $val, but it was $on"
fi
}
echo "$func:traceoff:3" > set_ftrace_filter
check_sleep "0"
echo 1 > tracing_on
check_sleep "0"
echo 1 > tracing_on
check_sleep "0"
echo 1 > tracing_on
check_sleep "1"
echo "!$func:traceoff:0" > set_ftrace_filter
if grep -e traceon -e traceoff set_ftrace_filter; then
fail "Tracing on and off triggers still exist"
fi
disable_events
exit 0
...@@ -30,6 +30,27 @@ reset_events_filter() { # reset all current setting filters ...@@ -30,6 +30,27 @@ reset_events_filter() { # reset all current setting filters
done done
} }
reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
echo > set_ftrace_filter
grep -v '^#' set_ftrace_filter | while read t; do
tr=`echo $t | cut -d: -f2`
if [ "$tr" == "" ]; then
continue
fi
if [ $tr == "enable_event" -o $tr == "disable_event" ]; then
tr=`echo $t | cut -d: -f1-4`
limit=`echo $t | cut -d: -f5`
else
tr=`echo $t | cut -d: -f1-2`
limit=`echo $t | cut -d: -f3`
fi
if [ "$limit" != "unlimited" ]; then
tr="$tr:$limit"
fi
echo "!$tr" > set_ftrace_filter
done
}
disable_events() { disable_events() {
echo 0 > events/enable echo 0 > events/enable
} }
......
#!/bin/sh
# description: Kretprobe dynamic event with maxactive
[ -f kprobe_events ] || exit_unsupported # this is configurable
echo > kprobe_events
# Test if we successfully reject unknown messages
if echo 'a:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
# Test if we successfully reject too big maxactive
if echo 'r1000000:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
# Test if we successfully reject unparsable numbers for maxactive
if echo 'r10fuzz:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
# Test for kretprobe with event name without maxactive
echo 'r:myprobeaccept inet_csk_accept' > kprobe_events
grep myprobeaccept kprobe_events
test -d events/kprobes/myprobeaccept
echo '-:myprobeaccept' >> kprobe_events
# Test for kretprobe with event name with a small maxactive
echo 'r10:myprobeaccept inet_csk_accept' > kprobe_events
grep myprobeaccept kprobe_events
test -d events/kprobes/myprobeaccept
echo '-:myprobeaccept' >> kprobe_events
# Test for kretprobe without event name without maxactive
echo 'r inet_csk_accept' > kprobe_events
grep inet_csk_accept kprobe_events
echo > kprobe_events
# Test for kretprobe without event name with a small maxactive
echo 'r10 inet_csk_accept' > kprobe_events
grep inet_csk_accept kprobe_events
echo > kprobe_events
clear_trace
#!/bin/sh #!/bin/sh
# description: event trigger - test event enable/disable trigger # description: event trigger - test event enable/disable trigger
# flags: instance
do_reset() { do_reset() {
reset_trigger reset_trigger
......
#!/bin/sh #!/bin/sh
# description: event trigger - test trigger filter # description: event trigger - test trigger filter
# flags: instance
do_reset() { do_reset() {
reset_trigger reset_trigger
......
#!/bin/sh #!/bin/sh
# description: event trigger - test histogram modifiers # description: event trigger - test histogram modifiers
# flags: instance
do_reset() { do_reset() {
reset_trigger reset_trigger
......
#!/bin/sh #!/bin/sh
# description: event trigger - test histogram trigger # description: event trigger - test histogram trigger
# flags: instance
do_reset() { do_reset() {
reset_trigger reset_trigger
......
#!/bin/sh #!/bin/sh
# description: event trigger - test multiple histogram triggers # description: event trigger - test multiple histogram triggers
# flags: instance
do_reset() { do_reset() {
reset_trigger reset_trigger
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment