Commit c9584234 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:

 - Update to the way irqs and preemption is tracked via the trace event
   PC field

 - Fix handling of unregistering event failing due to allocate memory.
   This is only triggered by failure injection, as it is pretty much
   guaranteed to have less than a page allocation succeed.

 - Do not show the useless "filter" or "enable" files for the "ftrace"
   trace system, as they have no effect on doing anything.

 - Add a warning if kprobes are registered more than once.

 - Synthetic events now have their fields parsed by semicolons. Old
   formats without semicolons will still work, but new features will
   require them.

 - New option to allow trace events to show %p without hashing in trace
   file. The trace file can only be read by root, and reading the raw
   event buffer did not have any pointers hashed, so this does not
   expose anything new.

 - New directory in tools called tools/tracing, where a new tool that
   reads sequential latency reports from the ftrace latency tracers.

 - Other minor fixes and cleanups.

* tag 'trace-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits)
  kprobes: Fix to delay the kprobes jump optimization
  tracing/tools: Add the latency-collector to tools directory
  tracing: Make hash-ptr option default
  tracing: Add ptr-hash option to show the hashed pointer value
  tracing: Update the stage 3 of trace event macro comment
  tracing: Show real address for trace event arguments
  selftests/ftrace: Add '!event' synthetic event syntax check
  selftests/ftrace: Update synthetic event syntax errors
  tracing: Add a backward-compatibility check for synthetic event creation
  tracing: Update synth command errors
  tracing: Rework synthetic event command parsing
  tracing/dynevent: Delegate parsing to create function
  kprobes: Warn if the kprobe is reregistered
  ftrace: Remove unused ftrace_force_update()
  tracepoints: Code clean up
  tracepoints: Do not punish non static call users
  tracepoints: Remove unnecessary "data_args" macro parameter
  tracing: Do not create "enable" or "filter" files for ftrace event subsystem
  kernel: trace: preemptirq_delay_test: add cpu affinity
  tracepoint: Do not fail unregistering a probe due to memory failure
  ...
parents 3a36281a c85c9a2c
...@@ -1159,6 +1159,12 @@ Here are the available options: ...@@ -1159,6 +1159,12 @@ Here are the available options:
This simulates the original behavior of the trace file. This simulates the original behavior of the trace file.
When the file is closed, tracing will be enabled again. When the file is closed, tracing will be enabled again.
hash-ptr
When set, "%p" in the event printk format displays the
hashed pointer value instead of real address.
This will be useful if you want to find out which hashed
value is corresponding to the real value in trace log.
record-cmd record-cmd
When any event or tracer is enabled, a hook is enabled When any event or tracer is enabled, a hook is enabled
in the sched_switch trace point to fill comm cache in the sched_switch trace point to fill comm cache
......
...@@ -485,7 +485,6 @@ struct dyn_ftrace { ...@@ -485,7 +485,6 @@ struct dyn_ftrace {
struct dyn_arch_ftrace arch; struct dyn_arch_ftrace arch;
}; };
int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset); int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
...@@ -740,7 +739,6 @@ extern void ftrace_disable_daemon(void); ...@@ -740,7 +739,6 @@ extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void); extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */ #else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; } static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { } static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { } static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { } static inline void ftrace_module_init(struct module *mod) { }
......
...@@ -34,8 +34,9 @@ int unregister_ftrace_export(struct trace_export *export); ...@@ -34,8 +34,9 @@ int unregister_ftrace_export(struct trace_export *export);
struct trace_array; struct trace_array;
void trace_printk_init_buffers(void); void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip, int trace_array_printk(struct trace_array *tr, unsigned long ip,
const char *fmt, ...); const char *fmt, ...);
int trace_array_init_printk(struct trace_array *tr); int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr); void trace_array_put(struct trace_array *tr);
struct trace_array *trace_array_get_by_name(const char *name); struct trace_array *trace_array_get_by_name(const char *name);
......
...@@ -55,6 +55,8 @@ struct trace_event; ...@@ -55,6 +55,8 @@ struct trace_event;
int trace_raw_output_prep(struct trace_iterator *iter, int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event); struct trace_event *event);
extern __printf(2, 3)
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/* /*
* The trace entry - the most basic unit of tracing. This is what * The trace entry - the most basic unit of tracing. This is what
...@@ -87,6 +89,8 @@ struct trace_iterator { ...@@ -87,6 +89,8 @@ struct trace_iterator {
unsigned long iter_flags; unsigned long iter_flags;
void *temp; /* temp holder */ void *temp; /* temp holder */
unsigned int temp_size; unsigned int temp_size;
char *fmt; /* modified format holder */
unsigned int fmt_size;
/* trace_seq for __print_flags() and __print_symbolic() etc. */ /* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq; struct trace_seq tmp_seq;
...@@ -148,17 +152,75 @@ enum print_line_t { ...@@ -148,17 +152,75 @@ enum print_line_t {
enum print_line_t trace_handle_return(struct trace_seq *s); enum print_line_t trace_handle_return(struct trace_seq *s);
void tracing_generic_entry_update(struct trace_entry *entry, static inline void tracing_generic_entry_update(struct trace_entry *entry,
unsigned short type, unsigned short type,
unsigned long flags, unsigned int trace_ctx)
int pc); {
entry->preempt_count = trace_ctx & 0xff;
entry->pid = current->pid;
entry->type = type;
entry->flags = trace_ctx >> 16;
}
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
};
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
unsigned int irq_status = irqs_disabled_flags(irqflags) ?
TRACE_FLAG_IRQS_OFF : 0;
return tracing_gen_ctx_irq_test(irq_status);
}
static inline unsigned int tracing_gen_ctx(void)
{
unsigned long irqflags;
local_save_flags(irqflags);
return tracing_gen_ctx_flags(irqflags);
}
#else
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
}
static inline unsigned int tracing_gen_ctx(void)
{
return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
}
#endif
static inline unsigned int tracing_gen_ctx_dec(void)
{
unsigned int trace_ctx;
trace_ctx = tracing_gen_ctx();
/*
* Subtract one from the preeption counter if preemption is enabled,
* see trace_event_buffer_reserve()for details.
*/
if (IS_ENABLED(CONFIG_PREEMPTION))
trace_ctx--;
return trace_ctx;
}
struct trace_event_file; struct trace_event_file;
struct ring_buffer_event * struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer, trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file, struct trace_event_file *trace_file,
int type, unsigned long len, int type, unsigned long len,
unsigned long flags, int pc); unsigned int trace_ctx);
#define TRACE_RECORD_CMDLINE BIT(0) #define TRACE_RECORD_CMDLINE BIT(0)
#define TRACE_RECORD_TGID BIT(1) #define TRACE_RECORD_TGID BIT(1)
...@@ -232,8 +294,7 @@ struct trace_event_buffer { ...@@ -232,8 +294,7 @@ struct trace_event_buffer {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_event_file *trace_file; struct trace_event_file *trace_file;
void *entry; void *entry;
unsigned long flags; unsigned int trace_ctx;
int pc;
struct pt_regs *regs; struct pt_regs *regs;
}; };
......
...@@ -152,25 +152,28 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -152,25 +152,28 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#ifdef TRACEPOINTS_ENABLED #ifdef TRACEPOINTS_ENABLED
#ifdef CONFIG_HAVE_STATIC_CALL #ifdef CONFIG_HAVE_STATIC_CALL
#define __DO_TRACE_CALL(name) static_call(tp_func_##name) #define __DO_TRACE_CALL(name, args) \
do { \
struct tracepoint_func *it_func_ptr; \
void *__data; \
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##name)->funcs); \
if (it_func_ptr) { \
__data = (it_func_ptr)->data; \
static_call(tp_func_##name)(__data, args); \
} \
} while (0)
#else #else
#define __DO_TRACE_CALL(name) __traceiter_##name #define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
#endif /* CONFIG_HAVE_STATIC_CALL */ #endif /* CONFIG_HAVE_STATIC_CALL */
/* /*
* it_func[0] is never NULL because there is at least one element in the array * it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL. * when the array itself is non NULL.
*
* Note, the proto and args passed in includes "__data" as the first parameter.
* The reason for this is to handle the "void" prototype. If a tracepoint
* has a "void" prototype, then it is invalid to declare a function
* as "(void *, void)".
*/ */
#define __DO_TRACE(name, proto, args, cond, rcuidle) \ #define __DO_TRACE(name, args, cond, rcuidle) \
do { \ do { \
struct tracepoint_func *it_func_ptr; \
int __maybe_unused __idx = 0; \ int __maybe_unused __idx = 0; \
void *__data; \
\ \
if (!(cond)) \ if (!(cond)) \
return; \ return; \
...@@ -190,12 +193,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -190,12 +193,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
rcu_irq_enter_irqson(); \ rcu_irq_enter_irqson(); \
} \ } \
\ \
it_func_ptr = \ __DO_TRACE_CALL(name, TP_ARGS(args)); \
rcu_dereference_raw((&__tracepoint_##name)->funcs); \
if (it_func_ptr) { \
__data = (it_func_ptr)->data; \
__DO_TRACE_CALL(name)(args); \
} \
\ \
if (rcuidle) { \ if (rcuidle) { \
rcu_irq_exit_irqson(); \ rcu_irq_exit_irqson(); \
...@@ -206,17 +204,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -206,17 +204,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} while (0) } while (0)
#ifndef MODULE #ifndef MODULE
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE_RCU(name, proto, args, cond) \
static inline void trace_##name##_rcuidle(proto) \ static inline void trace_##name##_rcuidle(proto) \
{ \ { \
if (static_key_false(&__tracepoint_##name.key)) \ if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(name, \ __DO_TRACE(name, \
TP_PROTO(data_proto), \ TP_ARGS(args), \
TP_ARGS(data_args), \
TP_CONDITION(cond), 1); \ TP_CONDITION(cond), 1); \
} }
#else #else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #define __DECLARE_TRACE_RCU(name, proto, args, cond)
#endif #endif
/* /*
...@@ -231,7 +228,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -231,7 +228,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* even when this tracepoint is off. This code has no purpose other than * even when this tracepoint is off. This code has no purpose other than
* poking RCU a bit. * poking RCU a bit.
*/ */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
extern int __traceiter_##name(data_proto); \ extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
...@@ -239,8 +236,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -239,8 +236,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
{ \ { \
if (static_key_false(&__tracepoint_##name.key)) \ if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(name, \ __DO_TRACE(name, \
TP_PROTO(data_proto), \ TP_ARGS(args), \
TP_ARGS(data_args), \
TP_CONDITION(cond), 0); \ TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \ rcu_read_lock_sched_notrace(); \
...@@ -249,7 +245,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -249,7 +245,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} \ } \
} \ } \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ PARAMS(cond)) \
static inline int \ static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \ register_trace_##name(void (*probe)(data_proto), void *data) \
{ \ { \
...@@ -309,7 +305,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -309,7 +305,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \ rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
if (it_func_ptr) { \ if (it_func_ptr) { \
do { \ do { \
it_func = (it_func_ptr)->func; \ it_func = READ_ONCE((it_func_ptr)->func); \
__data = (it_func_ptr)->data; \ __data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \ ((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \ } while ((++it_func_ptr)->func); \
...@@ -332,7 +328,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -332,7 +328,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#else /* !TRACEPOINTS_ENABLED */ #else /* !TRACEPOINTS_ENABLED */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ } \ { } \
static inline void trace_##name##_rcuidle(proto) \ static inline void trace_##name##_rcuidle(proto) \
...@@ -412,14 +408,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -412,14 +408,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define DECLARE_TRACE(name, proto, args) \ #define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \ cpu_online(raw_smp_processor_id()), \
PARAMS(void *__data, proto), \ PARAMS(void *__data, proto))
PARAMS(__data, args))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto), \ PARAMS(void *__data, proto))
PARAMS(__data, args))
#define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_FLAGS(event, flag)
......
...@@ -231,9 +231,11 @@ TRACE_MAKE_SYSTEM_STR(); ...@@ -231,9 +231,11 @@ TRACE_MAKE_SYSTEM_STR();
* { * {
* struct trace_seq *s = &iter->seq; * struct trace_seq *s = &iter->seq;
* struct trace_event_raw_<call> *field; <-- defined in stage 1 * struct trace_event_raw_<call> *field; <-- defined in stage 1
* struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq; * struct trace_seq *p = &iter->tmp_seq;
* int ret; *
* -------(for event)-------
*
* struct trace_entry *entry;
* *
* entry = iter->ent; * entry = iter->ent;
* *
...@@ -245,14 +247,23 @@ TRACE_MAKE_SYSTEM_STR(); ...@@ -245,14 +247,23 @@ TRACE_MAKE_SYSTEM_STR();
* field = (typeof(field))entry; * field = (typeof(field))entry;
* *
* trace_seq_init(p); * trace_seq_init(p);
* ret = trace_seq_printf(s, "%s: ", <call>); * return trace_output_call(iter, <call>, <TP_printk> "\n");
* if (ret)
* ret = trace_seq_printf(s, <TP_printk> "\n");
* if (!ret)
* return TRACE_TYPE_PARTIAL_LINE;
* *
* return TRACE_TYPE_HANDLED; * ------(or, for event class)------
* } *
* int ret;
*
* field = (typeof(field))iter->ent;
*
* ret = trace_raw_output_prep(iter, trace_event);
* if (ret != TRACE_TYPE_HANDLED)
* return ret;
*
* trace_event_printf(iter, <TP_printk> "\n");
*
* return trace_handle_return(s);
* -------
* }
* *
* This is the method used to print the raw event to the trace * This is the method used to print the raw event to the trace
* output format. Note, this is not needed if the data is read * output format. Note, this is not needed if the data is read
...@@ -364,7 +375,7 @@ trace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -364,7 +375,7 @@ trace_raw_output_##call(struct trace_iterator *iter, int flags, \
if (ret != TRACE_TYPE_HANDLED) \ if (ret != TRACE_TYPE_HANDLED) \
return ret; \ return ret; \
\ \
trace_seq_printf(s, print); \ trace_event_printf(iter, print); \
\ \
return trace_handle_return(s); \ return trace_handle_return(s); \
} \ } \
......
...@@ -861,7 +861,6 @@ static void try_to_optimize_kprobe(struct kprobe *p) ...@@ -861,7 +861,6 @@ static void try_to_optimize_kprobe(struct kprobe *p)
cpus_read_unlock(); cpus_read_unlock();
} }
#ifdef CONFIG_SYSCTL
static void optimize_all_kprobes(void) static void optimize_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
...@@ -887,6 +886,7 @@ static void optimize_all_kprobes(void) ...@@ -887,6 +886,7 @@ static void optimize_all_kprobes(void)
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
} }
#ifdef CONFIG_SYSCTL
static void unoptimize_all_kprobes(void) static void unoptimize_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
...@@ -1520,13 +1520,16 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) ...@@ -1520,13 +1520,16 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
return ap; return ap;
} }
/* Return error if the kprobe is being re-registered */ /*
static inline int check_kprobe_rereg(struct kprobe *p) * Warn and return error if the kprobe is being re-registered since
* there must be a software bug.
*/
static inline int warn_kprobe_rereg(struct kprobe *p)
{ {
int ret = 0; int ret = 0;
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
if (__get_valid_kprobe(p)) if (WARN_ON_ONCE(__get_valid_kprobe(p)))
ret = -EINVAL; ret = -EINVAL;
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
...@@ -1614,7 +1617,7 @@ int register_kprobe(struct kprobe *p) ...@@ -1614,7 +1617,7 @@ int register_kprobe(struct kprobe *p)
return PTR_ERR(addr); return PTR_ERR(addr);
p->addr = addr; p->addr = addr;
ret = check_kprobe_rereg(p); ret = warn_kprobe_rereg(p);
if (ret) if (ret)
return ret; return ret;
...@@ -1995,7 +1998,7 @@ int register_kretprobe(struct kretprobe *rp) ...@@ -1995,7 +1998,7 @@ int register_kretprobe(struct kretprobe *rp)
return ret; return ret;
/* If only rp->kp.addr is specified, check reregistering kprobes */ /* If only rp->kp.addr is specified, check reregistering kprobes */
if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
return -EINVAL; return -EINVAL;
if (kretprobe_blacklist_size) { if (kretprobe_blacklist_size) {
...@@ -2497,18 +2500,14 @@ static int __init init_kprobes(void) ...@@ -2497,18 +2500,14 @@ static int __init init_kprobes(void)
} }
} }
#if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
/* Init kprobe_optinsn_slots */
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif
/* By default, kprobes can be optimized */
kprobes_allow_optimization = true;
#endif
/* By default, kprobes are armed */ /* By default, kprobes are armed */
kprobes_all_disarmed = false; kprobes_all_disarmed = false;
#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
/* Init kprobe_optinsn_slots for allocation */
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif
err = arch_init_kprobes(); err = arch_init_kprobes();
if (!err) if (!err)
err = register_die_notifier(&kprobe_exceptions_nb); err = register_die_notifier(&kprobe_exceptions_nb);
...@@ -2523,6 +2522,21 @@ static int __init init_kprobes(void) ...@@ -2523,6 +2522,21 @@ static int __init init_kprobes(void)
} }
early_initcall(init_kprobes); early_initcall(init_kprobes);
#if defined(CONFIG_OPTPROBES)
static int __init init_optprobes(void)
{
/*
* Enable kprobe optimization - this kicks the optimizer which
* depends on synchronize_rcu_tasks() and ksoftirqd, that is
* not spawned in early initcall. So delay the optimization.
*/
optimize_all_kprobes();
return 0;
}
subsys_initcall(init_optprobes);
#endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static void report_probe(struct seq_file *pi, struct kprobe *p, static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp) const char *sym, int offset, char *modname, struct kprobe *pp)
......
...@@ -545,7 +545,7 @@ config KPROBE_EVENTS_ON_NOTRACE ...@@ -545,7 +545,7 @@ config KPROBE_EVENTS_ON_NOTRACE
using kprobe events. using kprobe events.
If kprobes can use ftrace instead of breakpoint, ftrace related If kprobes can use ftrace instead of breakpoint, ftrace related
functions are protected from kprobe-events to prevent an infinit functions are protected from kprobe-events to prevent an infinite
recursion or any unexpected execution path which leads to a kernel recursion or any unexpected execution path which leads to a kernel
crash. crash.
...@@ -886,6 +886,10 @@ config PREEMPTIRQ_DELAY_TEST ...@@ -886,6 +886,10 @@ config PREEMPTIRQ_DELAY_TEST
irq-disabled critical sections for 500us: irq-disabled critical sections for 500us:
modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
What's more, if you want to attach the test on the cpu which the latency
tracer is running on, specify cpu_affinity=cpu_num at the end of the
command.
If unsure, say N If unsure, say N
config SYNTH_EVENT_GEN_TEST config SYNTH_EVENT_GEN_TEST
......
...@@ -72,17 +72,17 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ...@@ -72,17 +72,17 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
struct blk_io_trace *t; struct blk_io_trace *t;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
struct trace_buffer *buffer = NULL; struct trace_buffer *buffer = NULL;
int pc = 0; unsigned int trace_ctx = 0;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled; bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0; ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) { if (blk_tracer) {
buffer = blk_tr->array_buffer.buffer; buffer = blk_tr->array_buffer.buffer;
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK, event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len, sizeof(*t) + len + cgid_len,
0, pc); trace_ctx);
if (!event) if (!event)
return; return;
t = ring_buffer_event_data(event); t = ring_buffer_event_data(event);
...@@ -107,7 +107,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ...@@ -107,7 +107,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
memcpy((void *) t + sizeof(*t) + cgid_len, data, len); memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
if (blk_tracer) if (blk_tracer)
trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
} }
} }
...@@ -222,8 +222,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -222,8 +222,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
struct blk_io_trace *t; struct blk_io_trace *t;
unsigned long flags = 0; unsigned long flags = 0;
unsigned long *sequence; unsigned long *sequence;
unsigned int trace_ctx = 0;
pid_t pid; pid_t pid;
int cpu, pc = 0; int cpu;
bool blk_tracer = blk_tracer_enabled; bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0; ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
...@@ -252,10 +253,10 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -252,10 +253,10 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
tracing_record_cmdline(current); tracing_record_cmdline(current);
buffer = blk_tr->array_buffer.buffer; buffer = blk_tr->array_buffer.buffer;
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK, event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len, sizeof(*t) + pdu_len + cgid_len,
0, pc); trace_ctx);
if (!event) if (!event)
return; return;
t = ring_buffer_event_data(event); t = ring_buffer_event_data(event);
...@@ -301,7 +302,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -301,7 +302,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
if (blk_tracer) { if (blk_tracer) {
trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
return; return;
} }
} }
......
...@@ -21,13 +21,16 @@ ...@@ -21,13 +21,16 @@
static ulong delay = 100; static ulong delay = 100;
static char test_mode[12] = "irq"; static char test_mode[12] = "irq";
static uint burst_size = 1; static uint burst_size = 1;
static int cpu_affinity = -1;
module_param_named(delay, delay, ulong, 0444); module_param_named(delay, delay, ulong, 0444);
module_param_string(test_mode, test_mode, 12, 0444); module_param_string(test_mode, test_mode, 12, 0444);
module_param_named(burst_size, burst_size, uint, 0444); module_param_named(burst_size, burst_size, uint, 0444);
module_param_named(cpu_affinity, cpu_affinity, int, 0444);
MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)"); MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");
MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)"); MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");
MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)"); MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)");
MODULE_PARM_DESC(cpu_affinity, "Cpu num test is running on");
static struct completion done; static struct completion done;
...@@ -36,7 +39,9 @@ static struct completion done; ...@@ -36,7 +39,9 @@ static struct completion done;
static void busy_wait(ulong time) static void busy_wait(ulong time)
{ {
u64 start, end; u64 start, end;
start = trace_clock_local(); start = trace_clock_local();
do { do {
end = trace_clock_local(); end = trace_clock_local();
if (kthread_should_stop()) if (kthread_should_stop())
...@@ -47,6 +52,7 @@ static void busy_wait(ulong time) ...@@ -47,6 +52,7 @@ static void busy_wait(ulong time)
static __always_inline void irqoff_test(void) static __always_inline void irqoff_test(void)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
busy_wait(delay); busy_wait(delay);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -113,6 +119,14 @@ static int preemptirq_delay_run(void *data) ...@@ -113,6 +119,14 @@ static int preemptirq_delay_run(void *data)
{ {
int i; int i;
int s = MIN(burst_size, NR_TEST_FUNCS); int s = MIN(burst_size, NR_TEST_FUNCS);
struct cpumask cpu_mask;
if (cpu_affinity > -1) {
cpumask_clear(&cpu_mask);
cpumask_set_cpu(cpu_affinity, &cpu_mask);
if (set_cpus_allowed_ptr(current, &cpu_mask))
pr_err("cpu_affinity:%d, failed\n", cpu_affinity);
}
for (i = 0; i < s; i++) for (i = 0; i < s; i++)
(testfuncs[i])(i); (testfuncs[i])(i);
......
...@@ -1112,8 +1112,7 @@ static struct list_head *rb_list_head(struct list_head *list) ...@@ -1112,8 +1112,7 @@ static struct list_head *rb_list_head(struct list_head *list)
* its flags will be non zero. * its flags will be non zero.
*/ */
static inline int static inline int
rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, rb_is_head_page(struct buffer_page *page, struct list_head *list)
struct buffer_page *page, struct list_head *list)
{ {
unsigned long val; unsigned long val;
...@@ -1142,8 +1141,7 @@ static bool rb_is_reader_page(struct buffer_page *page) ...@@ -1142,8 +1141,7 @@ static bool rb_is_reader_page(struct buffer_page *page)
/* /*
* rb_set_list_to_head - set a list_head to be pointing to head. * rb_set_list_to_head - set a list_head to be pointing to head.
*/ */
static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, static void rb_set_list_to_head(struct list_head *list)
struct list_head *list)
{ {
unsigned long *ptr; unsigned long *ptr;
...@@ -1166,7 +1164,7 @@ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1166,7 +1164,7 @@ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
/* /*
* Set the previous list pointer to have the HEAD flag. * Set the previous list pointer to have the HEAD flag.
*/ */
rb_set_list_to_head(cpu_buffer, head->list.prev); rb_set_list_to_head(head->list.prev);
} }
static void rb_list_head_clear(struct list_head *list) static void rb_list_head_clear(struct list_head *list)
...@@ -1241,8 +1239,7 @@ static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1241,8 +1239,7 @@ static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
old_flag, RB_PAGE_NORMAL); old_flag, RB_PAGE_NORMAL);
} }
static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, static inline void rb_inc_page(struct buffer_page **bpage)
struct buffer_page **bpage)
{ {
struct list_head *p = rb_list_head((*bpage)->list.next); struct list_head *p = rb_list_head((*bpage)->list.next);
...@@ -1274,11 +1271,11 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1274,11 +1271,11 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
*/ */
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
do { do {
if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { if (rb_is_head_page(page, page->list.prev)) {
cpu_buffer->head_page = page; cpu_buffer->head_page = page;
return page; return page;
} }
rb_inc_page(cpu_buffer, &page); rb_inc_page(&page);
} while (page != head); } while (page != head);
} }
...@@ -1824,7 +1821,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) ...@@ -1824,7 +1821,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
cond_resched(); cond_resched();
to_remove_page = tmp_iter_page; to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page); rb_inc_page(&tmp_iter_page);
/* update the counters */ /* update the counters */
page_entries = rb_page_entries(to_remove_page); page_entries = rb_page_entries(to_remove_page);
...@@ -2062,10 +2059,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, ...@@ -2062,10 +2059,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
put_online_cpus(); put_online_cpus();
} else { } else {
/* Make sure this CPU has been initialized */
if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu_id]; cpu_buffer = buffer->buffers[cpu_id];
if (nr_pages == cpu_buffer->nr_pages) if (nr_pages == cpu_buffer->nr_pages)
...@@ -2271,7 +2264,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter) ...@@ -2271,7 +2264,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
if (iter->head_page == cpu_buffer->reader_page) if (iter->head_page == cpu_buffer->reader_page)
iter->head_page = rb_set_head_page(cpu_buffer); iter->head_page = rb_set_head_page(cpu_buffer);
else else
rb_inc_page(cpu_buffer, &iter->head_page); rb_inc_page(&iter->head_page);
iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0; iter->head = 0;
...@@ -2374,7 +2367,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2374,7 +2367,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* want the outer most commit to reset it. * want the outer most commit to reset it.
*/ */
new_head = next_page; new_head = next_page;
rb_inc_page(cpu_buffer, &new_head); rb_inc_page(&new_head);
ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
RB_PAGE_NORMAL); RB_PAGE_NORMAL);
...@@ -2526,7 +2519,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2526,7 +2519,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
next_page = tail_page; next_page = tail_page;
rb_inc_page(cpu_buffer, &next_page); rb_inc_page(&next_page);
/* /*
* If for some reason, we had an interrupt storm that made * If for some reason, we had an interrupt storm that made
...@@ -2552,7 +2545,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2552,7 +2545,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* the buffer, unless the commit page is still on the * the buffer, unless the commit page is still on the
* reader page. * reader page.
*/ */
if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { if (rb_is_head_page(next_page, &tail_page->list)) {
/* /*
* If the commit is not on the reader page, then * If the commit is not on the reader page, then
...@@ -2583,7 +2576,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2583,7 +2576,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* have filled up the buffer with events * have filled up the buffer with events
* from interrupts and such, and wrapped. * from interrupts and such, and wrapped.
* *
* Note, if the tail page is also the on the * Note, if the tail page is also on the
* reader_page, we let it move out. * reader_page, we let it move out.
*/ */
if (unlikely((cpu_buffer->commit_page != if (unlikely((cpu_buffer->commit_page !=
...@@ -2879,7 +2872,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2879,7 +2872,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
return; return;
local_set(&cpu_buffer->commit_page->page->commit, local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page)); rb_page_write(cpu_buffer->commit_page));
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); rb_inc_page(&cpu_buffer->commit_page);
/* add barrier to keep gcc from optimizing too much */ /* add barrier to keep gcc from optimizing too much */
barrier(); barrier();
} }
...@@ -3638,14 +3631,14 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -3638,14 +3631,14 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* Because the commit page may be on the reader page we * Because the commit page may be on the reader page we
* start with the next page and check the end loop there. * start with the next page and check the end loop there.
*/ */
rb_inc_page(cpu_buffer, &bpage); rb_inc_page(&bpage);
start = bpage; start = bpage;
do { do {
if (bpage->page == (void *)addr) { if (bpage->page == (void *)addr) {
local_dec(&bpage->entries); local_dec(&bpage->entries);
return; return;
} }
rb_inc_page(cpu_buffer, &bpage); rb_inc_page(&bpage);
} while (bpage != start); } while (bpage != start);
/* commit not part of this buffer?? */ /* commit not part of this buffer?? */
...@@ -4367,7 +4360,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -4367,7 +4360,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->pages = reader->list.prev; cpu_buffer->pages = reader->list.prev;
/* The reader page will be pointing to the new head */ /* The reader page will be pointing to the new head */
rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); rb_set_list_to_head(&cpu_buffer->reader_page->list);
/* /*
* We want to make sure we read the overruns after we set up our * We want to make sure we read the overruns after we set up our
...@@ -4406,7 +4399,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -4406,7 +4399,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* Now make the new head point back to the reader page. * Now make the new head point back to the reader page.
*/ */
rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
rb_inc_page(cpu_buffer, &cpu_buffer->head_page); rb_inc_page(&cpu_buffer->head_page);
local_inc(&cpu_buffer->pages_read); local_inc(&cpu_buffer->pages_read);
......
This diff is collapsed.
...@@ -136,25 +136,6 @@ struct kretprobe_trace_entry_head { ...@@ -136,25 +136,6 @@ struct kretprobe_trace_entry_head {
unsigned long ret_ip; unsigned long ret_ip;
}; };
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
* IRQS_OFF - interrupts were disabled
* IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
};
#define TRACE_BUF_SIZE 1024 #define TRACE_BUF_SIZE 1024
struct trace_array; struct trace_array;
...@@ -589,8 +570,7 @@ struct ring_buffer_event * ...@@ -589,8 +570,7 @@ struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer, trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type, int type,
unsigned long len, unsigned long len,
unsigned long flags, unsigned int trace_ctx);
int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data); struct trace_array_cpu *data);
...@@ -601,6 +581,8 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, ...@@ -601,6 +581,8 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
int trace_empty(struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter); void *trace_find_next_entry_inc(struct trace_iterator *iter);
...@@ -615,11 +597,11 @@ unsigned long trace_total_entries(struct trace_array *tr); ...@@ -615,11 +597,11 @@ unsigned long trace_total_entries(struct trace_array *tr);
void trace_function(struct trace_array *tr, void trace_function(struct trace_array *tr,
unsigned long ip, unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
unsigned long flags, int pc); unsigned int trace_ctx);
void trace_graph_function(struct trace_array *tr, void trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
unsigned long flags, int pc); unsigned int trace_ctx);
void trace_latency_header(struct seq_file *m); void trace_latency_header(struct seq_file *m);
void trace_default_header(struct seq_file *m); void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter); void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
...@@ -687,11 +669,10 @@ static inline void latency_fsnotify(struct trace_array *tr) { } ...@@ -687,11 +669,10 @@ static inline void latency_fsnotify(struct trace_array *tr) { }
#endif #endif
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
int pc);
#else #else
static inline void __trace_stack(struct trace_array *tr, unsigned long flags, static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
int skip, int pc) int skip)
{ {
} }
#endif /* CONFIG_STACKTRACE */ #endif /* CONFIG_STACKTRACE */
...@@ -831,10 +812,10 @@ extern void graph_trace_open(struct trace_iterator *iter); ...@@ -831,10 +812,10 @@ extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter); extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr, extern int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace, struct ftrace_graph_ent *trace,
unsigned long flags, int pc); unsigned int trace_ctx);
extern void __trace_graph_return(struct trace_array *tr, extern void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace, struct ftrace_graph_ret *trace,
unsigned long flags, int pc); unsigned int trace_ctx);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
extern struct ftrace_hash __rcu *ftrace_graph_hash; extern struct ftrace_hash __rcu *ftrace_graph_hash;
...@@ -1194,6 +1175,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, ...@@ -1194,6 +1175,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(MARKERS, "markers"), \ C(MARKERS, "markers"), \
C(EVENT_FORK, "event-fork"), \ C(EVENT_FORK, "event-fork"), \
C(PAUSE_ON_TRACE, "pause-on-trace"), \ C(PAUSE_ON_TRACE, "pause-on-trace"), \
C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
FUNCTION_FLAGS \ FUNCTION_FLAGS \
FGRAPH_FLAGS \ FGRAPH_FLAGS \
STACK_FLAGS \ STACK_FLAGS \
...@@ -1297,15 +1279,15 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec, ...@@ -1297,15 +1279,15 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
void trace_buffer_unlock_commit_regs(struct trace_array *tr, void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc, unsigned int trcace_ctx,
struct pt_regs *regs); struct pt_regs *regs);
static inline void trace_buffer_unlock_commit(struct trace_array *tr, static inline void trace_buffer_unlock_commit(struct trace_array *tr,
struct trace_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned int trace_ctx)
{ {
trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
} }
DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
...@@ -1366,8 +1348,7 @@ __event_trigger_test_discard(struct trace_event_file *file, ...@@ -1366,8 +1348,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
* @buffer: The ring buffer that the event is being written to * @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer * @event: The event meta data in the ring buffer
* @entry: The event itself * @entry: The event itself
* @irq_flags: The state of the interrupts at the start of the event * @trace_ctx: The tracing context flags.
* @pc: The state of the preempt count at the start of the event.
* *
* This is a helper function to handle triggers that require data * This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and * from the event itself. It also tests the event against filters and
...@@ -1377,12 +1358,12 @@ static inline void ...@@ -1377,12 +1358,12 @@ static inline void
event_trigger_unlock_commit(struct trace_event_file *file, event_trigger_unlock_commit(struct trace_event_file *file,
struct trace_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc) void *entry, unsigned int trace_ctx)
{ {
enum event_trigger_type tt = ETT_NONE; enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
if (tt) if (tt)
event_triggers_post_call(file, tt); event_triggers_post_call(file, tt);
...@@ -1394,8 +1375,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, ...@@ -1394,8 +1375,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
* @buffer: The ring buffer that the event is being written to * @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer * @event: The event meta data in the ring buffer
* @entry: The event itself * @entry: The event itself
* @irq_flags: The state of the interrupts at the start of the event * @trace_ctx: The tracing context flags.
* @pc: The state of the preempt count at the start of the event.
* *
* This is a helper function to handle triggers that require data * This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and * from the event itself. It also tests the event against filters and
...@@ -1408,14 +1388,14 @@ static inline void ...@@ -1408,14 +1388,14 @@ static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file, event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct trace_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc, void *entry, unsigned int trace_ctx,
struct pt_regs *regs) struct pt_regs *regs)
{ {
enum event_trigger_type tt = ETT_NONE; enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit_regs(file->tr, buffer, event, trace_buffer_unlock_commit_regs(file->tr, buffer, event,
irq_flags, pc, regs); trace_ctx, regs);
if (tt) if (tt)
event_triggers_post_call(file, tt); event_triggers_post_call(file, tt);
...@@ -1830,10 +1810,9 @@ extern int tracing_set_cpumask(struct trace_array *tr, ...@@ -1830,10 +1810,9 @@ extern int tracing_set_cpumask(struct trace_array *tr,
#define MAX_EVENT_NAME_LEN 64 #define MAX_EVENT_NAME_LEN 64
extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
extern ssize_t trace_parse_run_command(struct file *file, extern ssize_t trace_parse_run_command(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos, const char __user *buffer, size_t count, loff_t *ppos,
int (*createfn)(int, char**)); int (*createfn)(const char *));
extern unsigned int err_pos(char *cmd, const char *str); extern unsigned int err_pos(char *cmd, const char *str);
extern void tracing_log_err(struct trace_array *tr, extern void tracing_log_err(struct trace_array *tr,
......
...@@ -37,7 +37,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) ...@@ -37,7 +37,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_branch *entry; struct trace_branch *entry;
unsigned long flags; unsigned long flags;
int pc; unsigned int trace_ctx;
const char *p; const char *p;
if (current->trace_recursion & TRACE_BRANCH_BIT) if (current->trace_recursion & TRACE_BRANCH_BIT)
...@@ -59,10 +59,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) ...@@ -59,10 +59,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
if (atomic_read(&data->disabled)) if (atomic_read(&data->disabled))
goto out; goto out;
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(flags);
buffer = tr->array_buffer.buffer; buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc); sizeof(*entry), trace_ctx);
if (!event) if (!event)
goto out; goto out;
......
...@@ -31,23 +31,31 @@ int dyn_event_register(struct dyn_event_operations *ops) ...@@ -31,23 +31,31 @@ int dyn_event_register(struct dyn_event_operations *ops)
return 0; return 0;
} }
int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) int dyn_event_release(const char *raw_command, struct dyn_event_operations *type)
{ {
struct dyn_event *pos, *n; struct dyn_event *pos, *n;
char *system = NULL, *event, *p; char *system = NULL, *event, *p;
int ret = -ENOENT; int argc, ret = -ENOENT;
char **argv;
argv = argv_split(GFP_KERNEL, raw_command, &argc);
if (!argv)
return -ENOMEM;
if (argv[0][0] == '-') { if (argv[0][0] == '-') {
if (argv[0][1] != ':') if (argv[0][1] != ':') {
return -EINVAL; ret = -EINVAL;
goto out;
}
event = &argv[0][2]; event = &argv[0][2];
} else { } else {
event = strchr(argv[0], ':'); event = strchr(argv[0], ':');
if (!event) if (!event) {
return -EINVAL; ret = -EINVAL;
goto out;
}
event++; event++;
} }
argc--; argv++;
p = strchr(event, '/'); p = strchr(event, '/');
if (p) { if (p) {
...@@ -63,7 +71,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) ...@@ -63,7 +71,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
if (type && type != pos->ops) if (type && type != pos->ops)
continue; continue;
if (!pos->ops->match(system, event, if (!pos->ops->match(system, event,
argc, (const char **)argv, pos)) argc - 1, (const char **)argv + 1, pos))
continue; continue;
ret = pos->ops->free(pos); ret = pos->ops->free(pos);
...@@ -71,21 +79,22 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) ...@@ -71,21 +79,22 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
break; break;
} }
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
out:
argv_free(argv);
return ret; return ret;
} }
static int create_dyn_event(int argc, char **argv) static int create_dyn_event(const char *raw_command)
{ {
struct dyn_event_operations *ops; struct dyn_event_operations *ops;
int ret = -ENODEV; int ret = -ENODEV;
if (argv[0][0] == '-' || argv[0][0] == '!') if (raw_command[0] == '-' || raw_command[0] == '!')
return dyn_event_release(argc, argv, NULL); return dyn_event_release(raw_command, NULL);
mutex_lock(&dyn_event_ops_mutex); mutex_lock(&dyn_event_ops_mutex);
list_for_each_entry(ops, &dyn_event_ops_list, list) { list_for_each_entry(ops, &dyn_event_ops_list, list) {
ret = ops->create(argc, (const char **)argv); ret = ops->create(raw_command);
if (!ret || ret != -ECANCELED) if (!ret || ret != -ECANCELED)
break; break;
} }
......
...@@ -39,7 +39,7 @@ struct dyn_event; ...@@ -39,7 +39,7 @@ struct dyn_event;
*/ */
struct dyn_event_operations { struct dyn_event_operations {
struct list_head list; struct list_head list;
int (*create)(int argc, const char *argv[]); int (*create)(const char *raw_command);
int (*show)(struct seq_file *m, struct dyn_event *ev); int (*show)(struct seq_file *m, struct dyn_event *ev);
bool (*is_busy)(struct dyn_event *ev); bool (*is_busy)(struct dyn_event *ev);
int (*free)(struct dyn_event *ev); int (*free)(struct dyn_event *ev);
...@@ -97,7 +97,7 @@ void *dyn_event_seq_start(struct seq_file *m, loff_t *pos); ...@@ -97,7 +97,7 @@ void *dyn_event_seq_start(struct seq_file *m, loff_t *pos);
void *dyn_event_seq_next(struct seq_file *m, void *v, loff_t *pos); void *dyn_event_seq_next(struct seq_file *m, void *v, loff_t *pos);
void dyn_event_seq_stop(struct seq_file *m, void *v); void dyn_event_seq_stop(struct seq_file *m, void *v);
int dyn_events_release_all(struct dyn_event_operations *type); int dyn_events_release_all(struct dyn_event_operations *type);
int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type); int dyn_event_release(const char *raw_command, struct dyn_event_operations *type);
/* /*
* for_each_dyn_event - iterate over the dyn_event list * for_each_dyn_event - iterate over the dyn_event list
......
...@@ -421,11 +421,8 @@ NOKPROBE_SYMBOL(perf_trace_buf_alloc); ...@@ -421,11 +421,8 @@ NOKPROBE_SYMBOL(perf_trace_buf_alloc);
void perf_trace_buf_update(void *record, u16 type) void perf_trace_buf_update(void *record, u16 type)
{ {
struct trace_entry *entry = record; struct trace_entry *entry = record;
int pc = preempt_count();
unsigned long flags;
local_save_flags(flags); tracing_generic_entry_update(entry, type, tracing_gen_ctx());
tracing_generic_entry_update(entry, type, flags, pc);
} }
NOKPROBE_SYMBOL(perf_trace_buf_update); NOKPROBE_SYMBOL(perf_trace_buf_update);
......
...@@ -258,22 +258,19 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, ...@@ -258,22 +258,19 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
trace_event_ignore_this_pid(trace_file)) trace_event_ignore_this_pid(trace_file))
return NULL; return NULL;
local_save_flags(fbuffer->flags);
fbuffer->pc = preempt_count();
/* /*
* If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
* preemption (adding one to the preempt_count). Since we are * preemption (adding one to the preempt_count). Since we are
* interested in the preempt_count at the time the tracepoint was * interested in the preempt_count at the time the tracepoint was
* hit, we need to subtract one to offset the increment. * hit, we need to subtract one to offset the increment.
*/ */
if (IS_ENABLED(CONFIG_PREEMPTION)) fbuffer->trace_ctx = tracing_gen_ctx_dec();
fbuffer->pc--;
fbuffer->trace_file = trace_file; fbuffer->trace_file = trace_file;
fbuffer->event = fbuffer->event =
trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
event_call->event.type, len, event_call->event.type, len,
fbuffer->flags, fbuffer->pc); fbuffer->trace_ctx);
if (!fbuffer->event) if (!fbuffer->event)
return NULL; return NULL;
...@@ -2101,16 +2098,20 @@ event_subsystem_dir(struct trace_array *tr, const char *name, ...@@ -2101,16 +2098,20 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
dir->subsystem = system; dir->subsystem = system;
file->system = dir; file->system = dir;
entry = tracefs_create_file("filter", 0644, dir->entry, dir, /* the ftrace system is special, do not create enable or filter files */
&ftrace_subsystem_filter_fops); if (strcmp(name, "ftrace") != 0) {
if (!entry) {
kfree(system->filter);
system->filter = NULL;
pr_warn("Could not create tracefs '%s/filter' entry\n", name);
}
trace_create_file("enable", 0644, dir->entry, dir, entry = tracefs_create_file("filter", 0644, dir->entry, dir,
&ftrace_system_enable_fops); &ftrace_subsystem_filter_fops);
if (!entry) {
kfree(system->filter);
system->filter = NULL;
pr_warn("Could not create tracefs '%s/filter' entry\n", name);
}
trace_create_file("enable", 0644, dir->entry, dir,
&ftrace_system_enable_fops);
}
list_add(&dir->list, &tr->systems); list_add(&dir->list, &tr->systems);
...@@ -3679,12 +3680,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip, ...@@ -3679,12 +3680,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct trace_buffer *buffer; struct trace_buffer *buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ftrace_entry *entry; struct ftrace_entry *entry;
unsigned long flags; unsigned int trace_ctx;
long disabled; long disabled;
int cpu; int cpu;
int pc;
pc = preempt_count(); trace_ctx = tracing_gen_ctx();
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
...@@ -3692,11 +3692,9 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip, ...@@ -3692,11 +3692,9 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
if (disabled != 1) if (disabled != 1)
goto out; goto out;
local_save_flags(flags);
event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
TRACE_FN, sizeof(*entry), TRACE_FN, sizeof(*entry),
flags, pc); trace_ctx);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -3704,7 +3702,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip, ...@@ -3704,7 +3702,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
event_trigger_unlock_commit(&event_trace_file, buffer, event, event_trigger_unlock_commit(&event_trace_file, buffer, event,
entry, flags, pc); entry, trace_ctx);
out: out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace(); preempt_enable_notrace();
......
...@@ -192,7 +192,6 @@ static void *trace_alloc_entry(struct trace_event_call *call, int *size) ...@@ -192,7 +192,6 @@ static void *trace_alloc_entry(struct trace_event_call *call, int *size)
static int parse_entry(char *str, struct trace_event_call *call, void **pentry) static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
{ {
struct ftrace_event_field *field; struct ftrace_event_field *field;
unsigned long irq_flags;
void *entry = NULL; void *entry = NULL;
int entry_size; int entry_size;
u64 val = 0; u64 val = 0;
...@@ -203,9 +202,8 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry) ...@@ -203,9 +202,8 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
if (!entry) if (!entry)
return -ENOMEM; return -ENOMEM;
local_save_flags(irq_flags); tracing_generic_entry_update(entry, call->event.type,
tracing_generic_entry_update(entry, call->event.type, irq_flags, tracing_gen_ctx());
preempt_count());
while ((len = parse_field(str, call, &field, &val)) > 0) { while ((len = parse_field(str, call, &field, &val)) > 0) {
if (is_function_field(field)) if (is_function_field(field))
......
This diff is collapsed.
...@@ -106,8 +106,7 @@ static int function_trace_init(struct trace_array *tr) ...@@ -106,8 +106,7 @@ static int function_trace_init(struct trace_array *tr)
ftrace_init_array_ops(tr, func); ftrace_init_array_ops(tr, func);
tr->array_buffer.cpu = get_cpu(); tr->array_buffer.cpu = raw_smp_processor_id();
put_cpu();
tracing_start_cmdline_record(); tracing_start_cmdline_record();
tracing_start_function_trace(tr); tracing_start_function_trace(tr);
...@@ -132,10 +131,9 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -132,10 +131,9 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
{ {
struct trace_array *tr = op->private; struct trace_array *tr = op->private;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned int trace_ctx;
int bit; int bit;
int cpu; int cpu;
int pc;
if (unlikely(!tr->function_enabled)) if (unlikely(!tr->function_enabled))
return; return;
...@@ -144,15 +142,14 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -144,15 +142,14 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
if (bit < 0) if (bit < 0)
return; return;
pc = preempt_count(); trace_ctx = tracing_gen_ctx();
preempt_disable_notrace(); preempt_disable_notrace();
cpu = smp_processor_id(); cpu = smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu); data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (!atomic_read(&data->disabled)) { if (!atomic_read(&data->disabled))
local_save_flags(flags); trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, flags, pc);
}
ftrace_test_recursion_unlock(bit); ftrace_test_recursion_unlock(bit);
preempt_enable_notrace(); preempt_enable_notrace();
} }
...@@ -184,7 +181,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -184,7 +181,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int cpu; int cpu;
int pc; unsigned int trace_ctx;
if (unlikely(!tr->function_enabled)) if (unlikely(!tr->function_enabled))
return; return;
...@@ -199,9 +196,9 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -199,9 +196,9 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, trace_ctx);
__trace_stack(tr, flags, STACK_SKIP, pc); __trace_stack(tr, trace_ctx, STACK_SKIP);
} }
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
...@@ -404,13 +401,11 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, ...@@ -404,13 +401,11 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
static __always_inline void trace_stack(struct trace_array *tr) static __always_inline void trace_stack(struct trace_array *tr)
{ {
unsigned long flags; unsigned int trace_ctx;
int pc;
local_save_flags(flags); trace_ctx = tracing_gen_ctx();
pc = preempt_count();
__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc); __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
} }
static void static void
......
...@@ -96,8 +96,7 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration, ...@@ -96,8 +96,7 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
int __trace_graph_entry(struct trace_array *tr, int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace, struct ftrace_graph_ent *trace,
unsigned long flags, unsigned int trace_ctx)
int pc)
{ {
struct trace_event_call *call = &event_funcgraph_entry; struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -105,7 +104,7 @@ int __trace_graph_entry(struct trace_array *tr, ...@@ -105,7 +104,7 @@ int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent_entry *entry; struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc); sizeof(*entry), trace_ctx);
if (!event) if (!event)
return 0; return 0;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -129,10 +128,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -129,10 +128,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = graph_array; struct trace_array *tr = graph_array;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
unsigned int trace_ctx;
long disabled; long disabled;
int ret; int ret;
int cpu; int cpu;
int pc;
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0; return 0;
...@@ -174,8 +173,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -174,8 +173,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
data = per_cpu_ptr(tr->array_buffer.data, cpu); data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(flags);
ret = __trace_graph_entry(tr, trace, flags, pc); ret = __trace_graph_entry(tr, trace, trace_ctx);
} else { } else {
ret = 0; ret = 0;
} }
...@@ -188,7 +187,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -188,7 +187,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
static void static void
__trace_graph_function(struct trace_array *tr, __trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long flags, int pc) unsigned long ip, unsigned int trace_ctx)
{ {
u64 time = trace_clock_local(); u64 time = trace_clock_local();
struct ftrace_graph_ent ent = { struct ftrace_graph_ent ent = {
...@@ -202,22 +201,21 @@ __trace_graph_function(struct trace_array *tr, ...@@ -202,22 +201,21 @@ __trace_graph_function(struct trace_array *tr,
.rettime = time, .rettime = time,
}; };
__trace_graph_entry(tr, &ent, flags, pc); __trace_graph_entry(tr, &ent, trace_ctx);
__trace_graph_return(tr, &ret, flags, pc); __trace_graph_return(tr, &ret, trace_ctx);
} }
void void
trace_graph_function(struct trace_array *tr, trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc) unsigned int trace_ctx)
{ {
__trace_graph_function(tr, ip, flags, pc); __trace_graph_function(tr, ip, trace_ctx);
} }
void __trace_graph_return(struct trace_array *tr, void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace, struct ftrace_graph_ret *trace,
unsigned long flags, unsigned int trace_ctx)
int pc)
{ {
struct trace_event_call *call = &event_funcgraph_exit; struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -225,7 +223,7 @@ void __trace_graph_return(struct trace_array *tr, ...@@ -225,7 +223,7 @@ void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret_entry *entry; struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc); sizeof(*entry), trace_ctx);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -239,9 +237,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -239,9 +237,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = graph_array; struct trace_array *tr = graph_array;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
unsigned int trace_ctx;
long disabled; long disabled;
int cpu; int cpu;
int pc;
ftrace_graph_addr_finish(trace); ftrace_graph_addr_finish(trace);
...@@ -255,8 +253,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -255,8 +253,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
data = per_cpu_ptr(tr->array_buffer.data, cpu); data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(flags);
__trace_graph_return(tr, trace, flags, pc); __trace_graph_return(tr, trace, trace_ctx);
} }
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -108,14 +108,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample) ...@@ -108,14 +108,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
struct trace_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct hwlat_entry *entry; struct hwlat_entry *entry;
unsigned long flags;
int pc;
pc = preempt_count();
local_save_flags(flags);
event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry), event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
flags, pc); tracing_gen_ctx());
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
......
...@@ -143,11 +143,14 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -143,11 +143,14 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
unsigned int trace_ctx;
if (!func_prolog_dec(tr, &data, &flags)) if (!func_prolog_dec(tr, &data, &flags))
return; return;
trace_function(tr, ip, parent_ip, flags, preempt_count()); trace_ctx = tracing_gen_ctx_flags(flags);
trace_function(tr, ip, parent_ip, trace_ctx);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
} }
...@@ -177,8 +180,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) ...@@ -177,8 +180,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
unsigned int trace_ctx;
int ret; int ret;
int pc;
if (ftrace_graph_ignore_func(trace)) if (ftrace_graph_ignore_func(trace))
return 0; return 0;
...@@ -195,8 +198,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) ...@@ -195,8 +198,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
if (!func_prolog_dec(tr, &data, &flags)) if (!func_prolog_dec(tr, &data, &flags))
return 0; return 0;
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(flags);
ret = __trace_graph_entry(tr, trace, flags, pc); ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
return ret; return ret;
...@@ -207,15 +210,15 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace) ...@@ -207,15 +210,15 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
int pc; unsigned int trace_ctx;
ftrace_graph_addr_finish(trace); ftrace_graph_addr_finish(trace);
if (!func_prolog_dec(tr, &data, &flags)) if (!func_prolog_dec(tr, &data, &flags))
return; return;
pc = preempt_count(); trace_ctx = tracing_gen_ctx_flags(flags);
__trace_graph_return(tr, trace, flags, pc); __trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
} }
...@@ -267,12 +270,12 @@ static void irqsoff_print_header(struct seq_file *s) ...@@ -267,12 +270,12 @@ static void irqsoff_print_header(struct seq_file *s)
static void static void
__trace_function(struct trace_array *tr, __trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc) unsigned int trace_ctx)
{ {
if (is_graph(tr)) if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, flags, pc); trace_graph_function(tr, ip, parent_ip, trace_ctx);
else else
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, trace_ctx);
} }
#else #else
...@@ -322,15 +325,13 @@ check_critical_timing(struct trace_array *tr, ...@@ -322,15 +325,13 @@ check_critical_timing(struct trace_array *tr,
{ {
u64 T0, T1, delta; u64 T0, T1, delta;
unsigned long flags; unsigned long flags;
int pc; unsigned int trace_ctx;
T0 = data->preempt_timestamp; T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu); T1 = ftrace_now(cpu);
delta = T1-T0; delta = T1-T0;
local_save_flags(flags); trace_ctx = tracing_gen_ctx();
pc = preempt_count();
if (!report_latency(tr, delta)) if (!report_latency(tr, delta))
goto out; goto out;
...@@ -341,9 +342,9 @@ check_critical_timing(struct trace_array *tr, ...@@ -341,9 +342,9 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(tr, delta)) if (!report_latency(tr, delta))
goto out_unlock; goto out_unlock;
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
/* Skip 5 functions to get to the irq/preempt enable function */ /* Skip 5 functions to get to the irq/preempt enable function */
__trace_stack(tr, flags, 5, pc); __trace_stack(tr, trace_ctx, 5);
if (data->critical_sequence != max_sequence) if (data->critical_sequence != max_sequence)
goto out_unlock; goto out_unlock;
...@@ -363,16 +364,15 @@ check_critical_timing(struct trace_array *tr, ...@@ -363,16 +364,15 @@ check_critical_timing(struct trace_array *tr,
out: out:
data->critical_sequence = max_sequence; data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu); data->preempt_timestamp = ftrace_now(cpu);
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
} }
static nokprobe_inline void static nokprobe_inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) start_critical_timing(unsigned long ip, unsigned long parent_ip)
{ {
int cpu; int cpu;
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags;
if (!tracer_enabled || !tracing_is_enabled()) if (!tracer_enabled || !tracing_is_enabled())
return; return;
...@@ -393,9 +393,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) ...@@ -393,9 +393,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
data->preempt_timestamp = ftrace_now(cpu); data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip; data->critical_start = parent_ip ? : ip;
local_save_flags(flags); __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
__trace_function(tr, ip, parent_ip, flags, pc);
per_cpu(tracing_cpu, cpu) = 1; per_cpu(tracing_cpu, cpu) = 1;
...@@ -403,12 +401,12 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) ...@@ -403,12 +401,12 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
} }
static nokprobe_inline void static nokprobe_inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{ {
int cpu; int cpu;
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned int trace_ctx;
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
/* Always clear the tracing cpu on stopping the trace */ /* Always clear the tracing cpu on stopping the trace */
...@@ -428,8 +426,8 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) ...@@ -428,8 +426,8 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
atomic_inc(&data->disabled); atomic_inc(&data->disabled);
local_save_flags(flags); trace_ctx = tracing_gen_ctx();
__trace_function(tr, ip, parent_ip, flags, pc); __trace_function(tr, ip, parent_ip, trace_ctx);
check_critical_timing(tr, data, parent_ip ? : ip, cpu); check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0; data->critical_start = 0;
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
...@@ -438,20 +436,16 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) ...@@ -438,20 +436,16 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
/* start and stop critical timings used to for stoppage (in idle) */ /* start and stop critical timings used to for stoppage (in idle) */
void start_critical_timings(void) void start_critical_timings(void)
{ {
int pc = preempt_count(); if (preempt_trace(preempt_count()) || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
if (preempt_trace(pc) || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
} }
EXPORT_SYMBOL_GPL(start_critical_timings); EXPORT_SYMBOL_GPL(start_critical_timings);
NOKPROBE_SYMBOL(start_critical_timings); NOKPROBE_SYMBOL(start_critical_timings);
void stop_critical_timings(void) void stop_critical_timings(void)
{ {
int pc = preempt_count(); if (preempt_trace(preempt_count()) || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
if (preempt_trace(pc) || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
} }
EXPORT_SYMBOL_GPL(stop_critical_timings); EXPORT_SYMBOL_GPL(stop_critical_timings);
NOKPROBE_SYMBOL(stop_critical_timings); NOKPROBE_SYMBOL(stop_critical_timings);
...@@ -613,19 +607,15 @@ static void irqsoff_tracer_stop(struct trace_array *tr) ...@@ -613,19 +607,15 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
*/ */
void tracer_hardirqs_on(unsigned long a0, unsigned long a1) void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
{ {
unsigned int pc = preempt_count(); if (!preempt_trace(preempt_count()) && irq_trace())
stop_critical_timing(a0, a1);
if (!preempt_trace(pc) && irq_trace())
stop_critical_timing(a0, a1, pc);
} }
NOKPROBE_SYMBOL(tracer_hardirqs_on); NOKPROBE_SYMBOL(tracer_hardirqs_on);
void tracer_hardirqs_off(unsigned long a0, unsigned long a1) void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
{ {
unsigned int pc = preempt_count(); if (!preempt_trace(preempt_count()) && irq_trace())
start_critical_timing(a0, a1);
if (!preempt_trace(pc) && irq_trace())
start_critical_timing(a0, a1, pc);
} }
NOKPROBE_SYMBOL(tracer_hardirqs_off); NOKPROBE_SYMBOL(tracer_hardirqs_off);
...@@ -665,18 +655,14 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -665,18 +655,14 @@ static struct tracer irqsoff_tracer __read_mostly =
#ifdef CONFIG_PREEMPT_TRACER #ifdef CONFIG_PREEMPT_TRACER
void tracer_preempt_on(unsigned long a0, unsigned long a1) void tracer_preempt_on(unsigned long a0, unsigned long a1)
{ {
int pc = preempt_count(); if (preempt_trace(preempt_count()) && !irq_trace())
stop_critical_timing(a0, a1);
if (preempt_trace(pc) && !irq_trace())
stop_critical_timing(a0, a1, pc);
} }
void tracer_preempt_off(unsigned long a0, unsigned long a1) void tracer_preempt_off(unsigned long a0, unsigned long a1)
{ {
int pc = preempt_count(); if (preempt_trace(preempt_count()) && !irq_trace())
start_critical_timing(a0, a1);
if (preempt_trace(pc) && !irq_trace())
start_critical_timing(a0, a1, pc);
} }
static int preemptoff_tracer_init(struct trace_array *tr) static int preemptoff_tracer_init(struct trace_array *tr)
......
...@@ -35,7 +35,7 @@ static int __init set_kprobe_boot_events(char *str) ...@@ -35,7 +35,7 @@ static int __init set_kprobe_boot_events(char *str)
} }
__setup("kprobe_event=", set_kprobe_boot_events); __setup("kprobe_event=", set_kprobe_boot_events);
static int trace_kprobe_create(int argc, const char **argv); static int trace_kprobe_create(const char *raw_command);
static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev); static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_kprobe_release(struct dyn_event *ev); static int trace_kprobe_release(struct dyn_event *ev);
static bool trace_kprobe_is_busy(struct dyn_event *ev); static bool trace_kprobe_is_busy(struct dyn_event *ev);
...@@ -711,7 +711,7 @@ static inline void sanitize_event_name(char *name) ...@@ -711,7 +711,7 @@ static inline void sanitize_event_name(char *name)
*name = '_'; *name = '_';
} }
static int trace_kprobe_create(int argc, const char *argv[]) static int __trace_kprobe_create(int argc, const char *argv[])
{ {
/* /*
* Argument syntax: * Argument syntax:
...@@ -910,20 +910,25 @@ static int trace_kprobe_create(int argc, const char *argv[]) ...@@ -910,20 +910,25 @@ static int trace_kprobe_create(int argc, const char *argv[])
goto out; goto out;
} }
static int create_or_delete_trace_kprobe(int argc, char **argv) static int trace_kprobe_create(const char *raw_command)
{
return trace_probe_create(raw_command, __trace_kprobe_create);
}
static int create_or_delete_trace_kprobe(const char *raw_command)
{ {
int ret; int ret;
if (argv[0][0] == '-') if (raw_command[0] == '-')
return dyn_event_release(argc, argv, &trace_kprobe_ops); return dyn_event_release(raw_command, &trace_kprobe_ops);
ret = trace_kprobe_create(argc, (const char **)argv); ret = trace_kprobe_create(raw_command);
return ret == -ECANCELED ? -EINVAL : ret; return ret == -ECANCELED ? -EINVAL : ret;
} }
static int trace_kprobe_run_command(struct dynevent_cmd *cmd) static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
{ {
return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe); return create_or_delete_trace_kprobe(cmd->seq.buffer);
} }
/** /**
...@@ -1084,7 +1089,7 @@ int kprobe_event_delete(const char *name) ...@@ -1084,7 +1089,7 @@ int kprobe_event_delete(const char *name)
snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name); snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
return trace_run_command(buf, create_or_delete_trace_kprobe); return create_or_delete_trace_kprobe(buf);
} }
EXPORT_SYMBOL_GPL(kprobe_event_delete); EXPORT_SYMBOL_GPL(kprobe_event_delete);
...@@ -1386,8 +1391,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, ...@@ -1386,8 +1391,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
if (trace_trigger_soft_disabled(trace_file)) if (trace_trigger_soft_disabled(trace_file))
return; return;
local_save_flags(fbuffer.flags); fbuffer.trace_ctx = tracing_gen_ctx();
fbuffer.pc = preempt_count();
fbuffer.trace_file = trace_file; fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs); dsize = __get_data_size(&tk->tp, regs);
...@@ -1396,7 +1400,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, ...@@ -1396,7 +1400,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file, trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type, call->event.type,
sizeof(*entry) + tk->tp.size + dsize, sizeof(*entry) + tk->tp.size + dsize,
fbuffer.flags, fbuffer.pc); fbuffer.trace_ctx);
if (!fbuffer.event) if (!fbuffer.event)
return; return;
...@@ -1434,8 +1438,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, ...@@ -1434,8 +1438,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
if (trace_trigger_soft_disabled(trace_file)) if (trace_trigger_soft_disabled(trace_file))
return; return;
local_save_flags(fbuffer.flags); fbuffer.trace_ctx = tracing_gen_ctx();
fbuffer.pc = preempt_count();
fbuffer.trace_file = trace_file; fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs); dsize = __get_data_size(&tk->tp, regs);
...@@ -1443,7 +1446,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, ...@@ -1443,7 +1446,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file, trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type, call->event.type,
sizeof(*entry) + tk->tp.size + dsize, sizeof(*entry) + tk->tp.size + dsize,
fbuffer.flags, fbuffer.pc); fbuffer.trace_ctx);
if (!fbuffer.event) if (!fbuffer.event)
return; return;
...@@ -1888,7 +1891,7 @@ static __init void setup_boot_kprobe_events(void) ...@@ -1888,7 +1891,7 @@ static __init void setup_boot_kprobe_events(void)
if (p) if (p)
*p++ = '\0'; *p++ = '\0';
ret = trace_run_command(cmd, create_or_delete_trace_kprobe); ret = create_or_delete_trace_kprobe(cmd);
if (ret) if (ret)
pr_warn("Failed to add event(%d): %s\n", ret, cmd); pr_warn("Failed to add event(%d): %s\n", ret, cmd);
...@@ -1982,8 +1985,7 @@ static __init int kprobe_trace_self_tests_init(void) ...@@ -1982,8 +1985,7 @@ static __init int kprobe_trace_self_tests_init(void)
pr_info("Testing kprobe tracing: "); pr_info("Testing kprobe tracing: ");
ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)", ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
create_or_delete_trace_kprobe);
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function entry.\n"); pr_warn("error on probing function entry.\n");
warn++; warn++;
...@@ -2004,8 +2006,7 @@ static __init int kprobe_trace_self_tests_init(void) ...@@ -2004,8 +2006,7 @@ static __init int kprobe_trace_self_tests_init(void)
} }
} }
ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval", ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
create_or_delete_trace_kprobe);
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function return.\n"); pr_warn("error on probing function return.\n");
warn++; warn++;
...@@ -2078,13 +2079,13 @@ static __init int kprobe_trace_self_tests_init(void) ...@@ -2078,13 +2079,13 @@ static __init int kprobe_trace_self_tests_init(void)
trace_probe_event_call(&tk->tp), file); trace_probe_event_call(&tk->tp), file);
} }
ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe); ret = create_or_delete_trace_kprobe("-:testprobe");
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n"); pr_warn("error on deleting a probe.\n");
warn++; warn++;
} }
ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe); ret = create_or_delete_trace_kprobe("-:testprobe2");
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n"); pr_warn("error on deleting a probe.\n");
warn++; warn++;
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
* Copyright (C) 2008 Pekka Paalanen <pq@iki.fi> * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
*/ */
#define DEBUG 1
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mmiotrace.h> #include <linux/mmiotrace.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -300,10 +298,11 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -300,10 +298,11 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry; struct trace_mmiotrace_rw *entry;
int pc = preempt_count(); unsigned int trace_ctx;
trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
sizeof(*entry), 0, pc); sizeof(*entry), trace_ctx);
if (!event) { if (!event) {
atomic_inc(&dropped_count); atomic_inc(&dropped_count);
return; return;
...@@ -312,7 +311,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -312,7 +311,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry->rw = *rw; entry->rw = *rw;
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(tr, buffer, event, 0, pc); trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
} }
void mmio_trace_rw(struct mmiotrace_rw *rw) void mmio_trace_rw(struct mmiotrace_rw *rw)
...@@ -330,10 +329,11 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -330,10 +329,11 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry; struct trace_mmiotrace_map *entry;
int pc = preempt_count(); unsigned int trace_ctx;
trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
sizeof(*entry), 0, pc); sizeof(*entry), trace_ctx);
if (!event) { if (!event) {
atomic_inc(&dropped_count); atomic_inc(&dropped_count);
return; return;
...@@ -342,7 +342,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -342,7 +342,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry->map = *map; entry->map = *map;
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(tr, buffer, event, 0, pc); trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
} }
void mmio_trace_mapping(struct mmiotrace_map *map) void mmio_trace_mapping(struct mmiotrace_map *map)
......
...@@ -312,13 +312,23 @@ int trace_raw_output_prep(struct trace_iterator *iter, ...@@ -312,13 +312,23 @@ int trace_raw_output_prep(struct trace_iterator *iter,
} }
EXPORT_SYMBOL(trace_raw_output_prep); EXPORT_SYMBOL(trace_raw_output_prep);
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
trace_seq_vprintf(&iter->seq, trace_event_format(iter, fmt), ap);
va_end(ap);
}
EXPORT_SYMBOL(trace_event_printf);
static int trace_output_raw(struct trace_iterator *iter, char *name, static int trace_output_raw(struct trace_iterator *iter, char *name,
char *fmt, va_list ap) char *fmt, va_list ap)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
trace_seq_printf(s, "%s: ", name); trace_seq_printf(s, "%s: ", name);
trace_seq_vprintf(s, fmt, ap); trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
return trace_handle_return(s); return trace_handle_return(s);
} }
......
...@@ -1134,3 +1134,20 @@ bool trace_probe_match_command_args(struct trace_probe *tp, ...@@ -1134,3 +1134,20 @@ bool trace_probe_match_command_args(struct trace_probe *tp,
} }
return true; return true;
} }
int trace_probe_create(const char *raw_command, int (*createfn)(int, const char **))
{
int argc = 0, ret = 0;
char **argv;
argv = argv_split(GFP_KERNEL, raw_command, &argc);
if (!argv)
return -ENOMEM;
if (argc)
ret = createfn(argc, (const char **)argv);
argv_free(argv);
return ret;
}
...@@ -341,6 +341,7 @@ struct event_file_link *trace_probe_get_file_link(struct trace_probe *tp, ...@@ -341,6 +341,7 @@ struct event_file_link *trace_probe_get_file_link(struct trace_probe *tp,
int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b); int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b);
bool trace_probe_match_command_args(struct trace_probe *tp, bool trace_probe_match_command_args(struct trace_probe *tp,
int argc, const char **argv); int argc, const char **argv);
int trace_probe_create(const char *raw_command, int (*createfn)(int, const char **));
#define trace_probe_for_each_link(pos, tp) \ #define trace_probe_for_each_link(pos, tp) \
list_for_each_entry(pos, &(tp)->event->files, list) list_for_each_entry(pos, &(tp)->event->files, list)
......
...@@ -67,7 +67,7 @@ static bool function_enabled; ...@@ -67,7 +67,7 @@ static bool function_enabled;
static int static int
func_prolog_preempt_disable(struct trace_array *tr, func_prolog_preempt_disable(struct trace_array *tr,
struct trace_array_cpu **data, struct trace_array_cpu **data,
int *pc) unsigned int *trace_ctx)
{ {
long disabled; long disabled;
int cpu; int cpu;
...@@ -75,7 +75,7 @@ func_prolog_preempt_disable(struct trace_array *tr, ...@@ -75,7 +75,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (likely(!wakeup_task)) if (likely(!wakeup_task))
return 0; return 0;
*pc = preempt_count(); *trace_ctx = tracing_gen_ctx();
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
...@@ -116,8 +116,8 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace) ...@@ -116,8 +116,8 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
{ {
struct trace_array *tr = wakeup_trace; struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned int trace_ctx;
int pc, ret = 0; int ret = 0;
if (ftrace_graph_ignore_func(trace)) if (ftrace_graph_ignore_func(trace))
return 0; return 0;
...@@ -131,11 +131,10 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace) ...@@ -131,11 +131,10 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
if (ftrace_graph_notrace_addr(trace->func)) if (ftrace_graph_notrace_addr(trace->func))
return 1; return 1;
if (!func_prolog_preempt_disable(tr, &data, &pc)) if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0; return 0;
local_save_flags(flags); ret = __trace_graph_entry(tr, trace, trace_ctx);
ret = __trace_graph_entry(tr, trace, flags, pc);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
...@@ -146,16 +145,14 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace) ...@@ -146,16 +145,14 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
{ {
struct trace_array *tr = wakeup_trace; struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned int trace_ctx;
int pc;
ftrace_graph_addr_finish(trace); ftrace_graph_addr_finish(trace);
if (!func_prolog_preempt_disable(tr, &data, &pc)) if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return; return;
local_save_flags(flags); __trace_graph_return(tr, trace, trace_ctx);
__trace_graph_return(tr, trace, flags, pc);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
...@@ -217,13 +214,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -217,13 +214,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr = wakeup_trace; struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
int pc; unsigned int trace_ctx;
if (!func_prolog_preempt_disable(tr, &data, &pc)) if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return; return;
local_irq_save(flags); local_irq_save(flags);
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, trace_ctx);
local_irq_restore(flags); local_irq_restore(flags);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
...@@ -303,12 +300,12 @@ static void wakeup_print_header(struct seq_file *s) ...@@ -303,12 +300,12 @@ static void wakeup_print_header(struct seq_file *s)
static void static void
__trace_function(struct trace_array *tr, __trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc) unsigned int trace_ctx)
{ {
if (is_graph(tr)) if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, flags, pc); trace_graph_function(tr, ip, parent_ip, trace_ctx);
else else
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, trace_ctx);
} }
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
...@@ -375,7 +372,7 @@ static void ...@@ -375,7 +372,7 @@ static void
tracing_sched_switch_trace(struct trace_array *tr, tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev, struct task_struct *prev,
struct task_struct *next, struct task_struct *next,
unsigned long flags, int pc) unsigned int trace_ctx)
{ {
struct trace_event_call *call = &event_context_switch; struct trace_event_call *call = &event_context_switch;
struct trace_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
...@@ -383,7 +380,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -383,7 +380,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_CTX, event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
sizeof(*entry), flags, pc); sizeof(*entry), trace_ctx);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -396,14 +393,14 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -396,14 +393,14 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(tr, buffer, event, flags, pc); trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
} }
static void static void
tracing_sched_wakeup_trace(struct trace_array *tr, tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee, struct task_struct *wakee,
struct task_struct *curr, struct task_struct *curr,
unsigned long flags, int pc) unsigned int trace_ctx)
{ {
struct trace_event_call *call = &event_wakeup; struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -411,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -411,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc); sizeof(*entry), trace_ctx);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -424,7 +421,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -424,7 +421,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(tr, buffer, event, flags, pc); trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
} }
static void notrace static void notrace
...@@ -436,7 +433,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, ...@@ -436,7 +433,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int cpu; int cpu;
int pc; unsigned int trace_ctx;
tracing_record_cmdline(prev); tracing_record_cmdline(prev);
...@@ -455,8 +452,6 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, ...@@ -455,8 +452,6 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (next != wakeup_task) if (next != wakeup_task)
return; return;
pc = preempt_count();
/* disable local data, not wakeup_cpu data */ /* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
...@@ -464,6 +459,8 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, ...@@ -464,6 +459,8 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out; goto out;
local_irq_save(flags); local_irq_save(flags);
trace_ctx = tracing_gen_ctx_flags(flags);
arch_spin_lock(&wakeup_lock); arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */ /* We could race with grabbing wakeup_lock */
...@@ -473,9 +470,9 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, ...@@ -473,9 +470,9 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
/* The task we are waiting for is waking up */ /* The task we are waiting for is waking up */
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
__trace_stack(wakeup_trace, flags, 0, pc); __trace_stack(wakeup_trace, trace_ctx, 0);
T0 = data->preempt_timestamp; T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu); T1 = ftrace_now(cpu);
...@@ -527,9 +524,8 @@ probe_wakeup(void *ignore, struct task_struct *p) ...@@ -527,9 +524,8 @@ probe_wakeup(void *ignore, struct task_struct *p)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
unsigned long flags;
long disabled; long disabled;
int pc; unsigned int trace_ctx;
if (likely(!tracer_enabled)) if (likely(!tracer_enabled))
return; return;
...@@ -550,11 +546,12 @@ probe_wakeup(void *ignore, struct task_struct *p) ...@@ -550,11 +546,12 @@ probe_wakeup(void *ignore, struct task_struct *p)
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return; return;
pc = preempt_count();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
trace_ctx = tracing_gen_ctx();
/* interrupts should be off from try_to_wake_up */ /* interrupts should be off from try_to_wake_up */
arch_spin_lock(&wakeup_lock); arch_spin_lock(&wakeup_lock);
...@@ -581,19 +578,17 @@ probe_wakeup(void *ignore, struct task_struct *p) ...@@ -581,19 +578,17 @@ probe_wakeup(void *ignore, struct task_struct *p)
wakeup_task = get_task_struct(p); wakeup_task = get_task_struct(p);
local_save_flags(flags);
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu); data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
__trace_stack(wakeup_trace, flags, 0, pc); __trace_stack(wakeup_trace, trace_ctx, 0);
/* /*
* We must be careful in using CALLER_ADDR2. But since wake_up * We must be careful in using CALLER_ADDR2. But since wake_up
* is not called by an assembly function (where as schedule is) * is not called by an assembly function (where as schedule is)
* it should be safe to use it here. * it should be safe to use it here.
*/ */
__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
out_locked: out_locked:
arch_spin_unlock(&wakeup_lock); arch_spin_unlock(&wakeup_lock);
......
...@@ -298,9 +298,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -298,9 +298,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_buffer *buffer; struct trace_buffer *buffer;
unsigned long irq_flags; unsigned int trace_ctx;
unsigned long args[6]; unsigned long args[6];
int pc;
int syscall_nr; int syscall_nr;
int size; int size;
...@@ -322,12 +321,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -322,12 +321,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
local_save_flags(irq_flags); trace_ctx = tracing_gen_ctx();
pc = preempt_count();
buffer = tr->array_buffer.buffer; buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, irq_flags, pc); sys_data->enter_event->event.type, size, trace_ctx);
if (!event) if (!event)
return; return;
...@@ -337,7 +335,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -337,7 +335,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args); memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
event_trigger_unlock_commit(trace_file, buffer, event, entry, event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc); trace_ctx);
} }
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
...@@ -348,8 +346,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -348,8 +346,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_buffer *buffer; struct trace_buffer *buffer;
unsigned long irq_flags; unsigned int trace_ctx;
int pc;
int syscall_nr; int syscall_nr;
syscall_nr = trace_get_syscall_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
...@@ -368,13 +365,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -368,13 +365,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
if (!sys_data) if (!sys_data)
return; return;
local_save_flags(irq_flags); trace_ctx = tracing_gen_ctx();
pc = preempt_count();
buffer = tr->array_buffer.buffer; buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry), sys_data->exit_event->event.type, sizeof(*entry),
irq_flags, pc); trace_ctx);
if (!event) if (!event)
return; return;
...@@ -383,7 +379,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -383,7 +379,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->ret = syscall_get_return_value(current, regs); entry->ret = syscall_get_return_value(current, regs);
event_trigger_unlock_commit(trace_file, buffer, event, entry, event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc); trace_ctx);
} }
static int reg_event_syscall_enter(struct trace_event_file *file, static int reg_event_syscall_enter(struct trace_event_file *file,
......
...@@ -34,7 +34,7 @@ struct uprobe_trace_entry_head { ...@@ -34,7 +34,7 @@ struct uprobe_trace_entry_head {
#define DATAOF_TRACE_ENTRY(entry, is_return) \ #define DATAOF_TRACE_ENTRY(entry, is_return) \
((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
static int trace_uprobe_create(int argc, const char **argv); static int trace_uprobe_create(const char *raw_command);
static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev); static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_uprobe_release(struct dyn_event *ev); static int trace_uprobe_release(struct dyn_event *ev);
static bool trace_uprobe_is_busy(struct dyn_event *ev); static bool trace_uprobe_is_busy(struct dyn_event *ev);
...@@ -530,7 +530,7 @@ static int register_trace_uprobe(struct trace_uprobe *tu) ...@@ -530,7 +530,7 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
* Argument syntax: * Argument syntax:
* - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS] * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
*/ */
static int trace_uprobe_create(int argc, const char **argv) static int __trace_uprobe_create(int argc, const char **argv)
{ {
struct trace_uprobe *tu; struct trace_uprobe *tu;
const char *event = NULL, *group = UPROBE_EVENT_SYSTEM; const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
...@@ -716,14 +716,19 @@ static int trace_uprobe_create(int argc, const char **argv) ...@@ -716,14 +716,19 @@ static int trace_uprobe_create(int argc, const char **argv)
return ret; return ret;
} }
static int create_or_delete_trace_uprobe(int argc, char **argv) int trace_uprobe_create(const char *raw_command)
{
return trace_probe_create(raw_command, __trace_uprobe_create);
}
static int create_or_delete_trace_uprobe(const char *raw_command)
{ {
int ret; int ret;
if (argv[0][0] == '-') if (raw_command[0] == '-')
return dyn_event_release(argc, argv, &trace_uprobe_ops); return dyn_event_release(raw_command, &trace_uprobe_ops);
ret = trace_uprobe_create(argc, (const char **)argv); ret = trace_uprobe_create(raw_command);
return ret == -ECANCELED ? -EINVAL : ret; return ret == -ECANCELED ? -EINVAL : ret;
} }
...@@ -961,7 +966,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, ...@@ -961,7 +966,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
size = esize + tu->tp.size + dsize; size = esize + tu->tp.size + dsize;
event = trace_event_buffer_lock_reserve(&buffer, trace_file, event = trace_event_buffer_lock_reserve(&buffer, trace_file,
call->event.type, size, 0, 0); call->event.type, size, 0);
if (!event) if (!event)
return; return;
...@@ -977,7 +982,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, ...@@ -977,7 +982,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
memcpy(data, ucb->buf, tu->tp.size + dsize); memcpy(data, ucb->buf, tu->tp.size + dsize);
event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
} }
/* uprobe handler */ /* uprobe handler */
...@@ -1635,7 +1640,7 @@ void destroy_local_trace_uprobe(struct trace_event_call *event_call) ...@@ -1635,7 +1640,7 @@ void destroy_local_trace_uprobe(struct trace_event_call *event_call)
} }
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
/* Make a trace interface for controling probe points */ /* Make a trace interface for controlling probe points */
static __init int init_uprobe_trace(void) static __init int init_uprobe_trace(void)
{ {
int ret; int ret;
......
...@@ -53,6 +53,12 @@ struct tp_probes { ...@@ -53,6 +53,12 @@ struct tp_probes {
struct tracepoint_func probes[]; struct tracepoint_func probes[];
}; };
/* Called in removal of a func but failed to allocate a new tp_funcs */
static void tp_stub_func(void)
{
return;
}
static inline void *allocate_probes(int count) static inline void *allocate_probes(int count)
{ {
struct tp_probes *p = kmalloc(struct_size(p, probes, count), struct tp_probes *p = kmalloc(struct_size(p, probes, count),
...@@ -130,8 +136,9 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, ...@@ -130,8 +136,9 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
int prio) int prio)
{ {
struct tracepoint_func *old, *new; struct tracepoint_func *old, *new;
int nr_probes = 0; int iter_probes; /* Iterate over old probe array. */
int pos = -1; int nr_probes = 0; /* Counter for probes */
int pos = -1; /* Insertion position into new array */
if (WARN_ON(!tp_func->func)) if (WARN_ON(!tp_func->func))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -140,13 +147,13 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, ...@@ -140,13 +147,13 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
old = *funcs; old = *funcs;
if (old) { if (old) {
/* (N -> N+1), (N != 0, 1) probes */ /* (N -> N+1), (N != 0, 1) probes */
for (nr_probes = 0; old[nr_probes].func; nr_probes++) { for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
/* Insert before probes of lower priority */ if (old[iter_probes].func == tp_stub_func)
if (pos < 0 && old[nr_probes].prio < prio) continue; /* Skip stub functions. */
pos = nr_probes; if (old[iter_probes].func == tp_func->func &&
if (old[nr_probes].func == tp_func->func && old[iter_probes].data == tp_func->data)
old[nr_probes].data == tp_func->data)
return ERR_PTR(-EEXIST); return ERR_PTR(-EEXIST);
nr_probes++;
} }
} }
/* + 2 : one for new probe, one for NULL func */ /* + 2 : one for new probe, one for NULL func */
...@@ -154,20 +161,24 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, ...@@ -154,20 +161,24 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
if (new == NULL) if (new == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (old) { if (old) {
if (pos < 0) { nr_probes = 0;
pos = nr_probes; for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); if (old[iter_probes].func == tp_stub_func)
} else { continue;
/* Copy higher priority probes ahead of the new probe */ /* Insert before probes of lower priority */
memcpy(new, old, pos * sizeof(struct tracepoint_func)); if (pos < 0 && old[iter_probes].prio < prio)
/* Copy the rest after it. */ pos = nr_probes++;
memcpy(new + pos + 1, old + pos, new[nr_probes++] = old[iter_probes];
(nr_probes - pos) * sizeof(struct tracepoint_func));
} }
} else if (pos < 0)
pos = nr_probes++;
/* nr_probes now points to the end of the new array */
} else {
pos = 0; pos = 0;
nr_probes = 1; /* must point at end of array */
}
new[pos] = *tp_func; new[pos] = *tp_func;
new[nr_probes + 1].func = NULL; new[nr_probes].func = NULL;
*funcs = new; *funcs = new;
debug_print_probes(*funcs); debug_print_probes(*funcs);
return old; return old;
...@@ -188,8 +199,9 @@ static void *func_remove(struct tracepoint_func **funcs, ...@@ -188,8 +199,9 @@ static void *func_remove(struct tracepoint_func **funcs,
/* (N -> M), (N > 1, M >= 0) probes */ /* (N -> M), (N > 1, M >= 0) probes */
if (tp_func->func) { if (tp_func->func) {
for (nr_probes = 0; old[nr_probes].func; nr_probes++) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
if (old[nr_probes].func == tp_func->func && if ((old[nr_probes].func == tp_func->func &&
old[nr_probes].data == tp_func->data) old[nr_probes].data == tp_func->data) ||
old[nr_probes].func == tp_stub_func)
nr_del++; nr_del++;
} }
} }
...@@ -208,14 +220,27 @@ static void *func_remove(struct tracepoint_func **funcs, ...@@ -208,14 +220,27 @@ static void *func_remove(struct tracepoint_func **funcs,
/* N -> M, (N > 1, M > 0) */ /* N -> M, (N > 1, M > 0) */
/* + 1 for NULL */ /* + 1 for NULL */
new = allocate_probes(nr_probes - nr_del + 1); new = allocate_probes(nr_probes - nr_del + 1);
if (new == NULL) if (new) {
return ERR_PTR(-ENOMEM); for (i = 0; old[i].func; i++) {
for (i = 0; old[i].func; i++) if ((old[i].func != tp_func->func ||
if (old[i].func != tp_func->func old[i].data != tp_func->data) &&
|| old[i].data != tp_func->data) old[i].func != tp_stub_func)
new[j++] = old[i]; new[j++] = old[i];
new[nr_probes - nr_del].func = NULL; }
*funcs = new; new[nr_probes - nr_del].func = NULL;
*funcs = new;
} else {
/*
* Failed to allocate, replace the old function
* with calls to tp_stub_func.
*/
for (i = 0; old[i].func; i++) {
if (old[i].func == tp_func->func &&
old[i].data == tp_func->data)
WRITE_ONCE(old[i].func, tp_stub_func);
}
*funcs = old;
}
} }
debug_print_probes(*funcs); debug_print_probes(*funcs);
return old; return old;
...@@ -295,10 +320,12 @@ static int tracepoint_remove_func(struct tracepoint *tp, ...@@ -295,10 +320,12 @@ static int tracepoint_remove_func(struct tracepoint *tp,
tp_funcs = rcu_dereference_protected(tp->funcs, tp_funcs = rcu_dereference_protected(tp->funcs,
lockdep_is_held(&tracepoints_mutex)); lockdep_is_held(&tracepoints_mutex));
old = func_remove(&tp_funcs, func); old = func_remove(&tp_funcs, func);
if (IS_ERR(old)) { if (WARN_ON_ONCE(IS_ERR(old)))
WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old); return PTR_ERR(old);
}
if (tp_funcs == old)
/* Failed allocating new tp_funcs, replaced func with stub */
return 0;
if (!tp_funcs) { if (!tp_funcs) {
/* Removed last function */ /* Removed last function */
......
...@@ -31,6 +31,7 @@ help: ...@@ -31,6 +31,7 @@ help:
@echo ' bootconfig - boot config tool' @echo ' bootconfig - boot config tool'
@echo ' spi - spi tools' @echo ' spi - spi tools'
@echo ' tmon - thermal monitoring and tuning tool' @echo ' tmon - thermal monitoring and tuning tool'
@echo ' tracing - misc tracing tools'
@echo ' turbostat - Intel CPU idle stats and freq reporting tool' @echo ' turbostat - Intel CPU idle stats and freq reporting tool'
@echo ' usb - USB testing tools' @echo ' usb - USB testing tools'
@echo ' virtio - vhost test module' @echo ' virtio - vhost test module'
...@@ -64,7 +65,7 @@ acpi: FORCE ...@@ -64,7 +65,7 @@ acpi: FORCE
cpupower: FORCE cpupower: FORCE
$(call descend,power/$@) $(call descend,power/$@)
cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
$(call descend,$@) $(call descend,$@)
bpf/%: FORCE bpf/%: FORCE
...@@ -103,7 +104,7 @@ all: acpi cgroup cpupower gpio hv firewire liblockdep \ ...@@ -103,7 +104,7 @@ all: acpi cgroup cpupower gpio hv firewire liblockdep \
perf selftests bootconfig spi turbostat usb \ perf selftests bootconfig spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \ virtio vm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \ tmon freefall iio objtool kvm_stat wmi \
pci debugging pci debugging tracing
acpi_install: acpi_install:
$(call descend,power/$(@:_install=),install) $(call descend,power/$(@:_install=),install)
...@@ -111,7 +112,7 @@ acpi_install: ...@@ -111,7 +112,7 @@ acpi_install:
cpupower_install: cpupower_install:
$(call descend,power/$(@:_install=),install) $(call descend,power/$(@:_install=),install)
cgroup_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install: cgroup_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
$(call descend,$(@:_install=),install) $(call descend,$(@:_install=),install)
liblockdep_install: liblockdep_install:
...@@ -137,7 +138,8 @@ install: acpi_install cgroup_install cpupower_install gpio_install \ ...@@ -137,7 +138,8 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
perf_install selftests_install turbostat_install usb_install \ perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \ virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \ tmon_install freefall_install objtool_install kvm_stat_install \
wmi_install pci_install debugging_install intel-speed-select_install wmi_install pci_install debugging_install intel-speed-select_install \
tracing_install
acpi_clean: acpi_clean:
$(call descend,power/acpi,clean) $(call descend,power/acpi,clean)
...@@ -145,7 +147,7 @@ acpi_clean: ...@@ -145,7 +147,7 @@ acpi_clean:
cpupower_clean: cpupower_clean:
$(call descend,power/cpupower,clean) $(call descend,power/cpupower,clean)
cgroup_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean: cgroup_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean) $(call descend,$(@:_clean=),clean)
liblockdep_clean: liblockdep_clean:
...@@ -184,6 +186,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \ ...@@ -184,6 +186,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \ freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \ gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
intel-speed-select_clean intel-speed-select_clean tracing_clean
.PHONY: FORCE .PHONY: FORCE
...@@ -32,6 +32,10 @@ grep "myevent[[:space:]]u64 var1" synthetic_events ...@@ -32,6 +32,10 @@ grep "myevent[[:space:]]u64 var1" synthetic_events
# it is not possible to add same name event # it is not possible to add same name event
! echo "myevent u64 var2" >> synthetic_events ! echo "myevent u64 var2" >> synthetic_events
# make sure !synthetic event doesn't require a field
echo "!myevent" >> synthetic_events
echo "myevent u64 var1" >> synthetic_events
# Non-append open will cleanup all events and add new one # Non-append open will cleanup all events and add new one
echo "myevent u64 var2" > synthetic_events echo "myevent u64 var2" > synthetic_events
......
#!/bin/sh #!/bin/sh
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic_events syntax parser errors # description: event trigger - test synthetic_events syntax parser errors
# requires: synthetic_events error_log # requires: synthetic_events error_log "char name[]' >> synthetic_events":README
check_error() { # command-with-error-pos-by-^ check_error() { # command-with-error-pos-by-^
ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events' ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
} }
check_dyn_error() { # command-with-error-pos-by-^
ftrace_errlog_check 'synthetic_events' "$1" 'dynamic_events'
}
check_error 'myevent ^chr arg' # INVALID_TYPE check_error 'myevent ^chr arg' # INVALID_TYPE
check_error 'myevent ^char str[];; int v' # INVALID_TYPE check_error 'myevent ^unsigned arg' # INCOMPLETE_TYPE
check_error 'myevent char ^str]; int v' # INVALID_NAME
check_error 'myevent char ^str;[]' # INVALID_NAME check_error 'myevent char ^str]; int v' # BAD_NAME
check_error 'myevent ^char str[; int v' # INVALID_TYPE check_error '^mye-vent char str[]' # BAD_NAME
check_error '^mye;vent char str[]' # BAD_NAME check_error 'myevent char ^st-r[]' # BAD_NAME
check_error 'myevent char str[]; ^int' # INVALID_FIELD
check_error '^myevent' # INCOMPLETE_CMD check_error 'myevent char str;^[]' # INVALID_FIELD
check_error 'myevent char str; ^int' # INVALID_FIELD
check_error 'myevent char ^str[; int v' # INVALID_ARRAY_SPEC
check_error 'myevent char ^str[kdjdk]' # INVALID_ARRAY_SPEC
check_error 'myevent char ^str[257]' # INVALID_ARRAY_SPEC
check_error '^mye;vent char str[]' # INVALID_CMD
check_error '^myevent ; char str[]' # INVALID_CMD
check_error '^myevent; char str[]' # INVALID_CMD
check_error '^myevent ;char str[]' # INVALID_CMD
check_error '^; char str[]' # INVALID_CMD
check_error '^;myevent char str[]' # INVALID_CMD
check_error '^myevent' # INVALID_CMD
check_dyn_error '^s:junk/myevent char str[' # INVALID_DYN_CMD
exit 0 exit 0
# SPDX-License-Identifier: GPL-2.0
include ../scripts/Makefile.include
all: latency
clean: latency_clean
install: latency_install
latency:
$(call descend,latency)
latency_install:
$(call descend,latency,install)
latency_clean:
$(call descend,latency,clean)
.PHONY: all install clean latency latency_install latency_clean
# SPDX-License-Identifier: GPL-2.0
latency-collector
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm tools
#
VAR_CFLAGS := $(shell pkg-config --cflags libtracefs 2>/dev/null)
VAR_LDLIBS := $(shell pkg-config --libs libtracefs 2>/dev/null)
TARGETS = latency-collector
CFLAGS = -Wall -Wextra -g -O2 $(VAR_CFLAGS)
LDFLAGS = -lpthread $(VAR_LDLIBS)
all: $(TARGETS)
%: %.c
$(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
clean:
$(RM) latency-collector
prefix ?= /usr/local
sbindir ?= ${prefix}/sbin
install: all
install -d $(DESTDIR)$(sbindir)
install -m 755 -p $(TARGETS) $(DESTDIR)$(sbindir)
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment