Commit f306cc82 authored by Tom Zanussi's avatar Tom Zanussi Committed by Steven Rostedt

tracing: Update event filters for multibuffer

The trace event filters are still tied to event calls rather than
event files, which means you don't get what you'd expect when using
filters in the multibuffer case:

Before:

  # echo 'bytes_alloc > 8192' > /sys/kernel/debug/tracing/events/kmem/kmalloc/filter
  # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter
  bytes_alloc > 8192
  # mkdir /sys/kernel/debug/tracing/instances/test1
  # echo 'bytes_alloc > 2048' > /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter
  # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter
  bytes_alloc > 2048
  # cat /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter
  bytes_alloc > 2048

Setting the filter in tracing/instances/test1/events shouldn't affect
the same event in tracing/events as it does above.

After:

  # echo 'bytes_alloc > 8192' > /sys/kernel/debug/tracing/events/kmem/kmalloc/filter
  # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter
  bytes_alloc > 8192
  # mkdir /sys/kernel/debug/tracing/instances/test1
  # echo 'bytes_alloc > 2048' > /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter
  # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter
  bytes_alloc > 8192
  # cat /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter
  bytes_alloc > 2048

We'd like to just move the filter directly from ftrace_event_call to
ftrace_event_file, but there are a couple cases that don't yet have
multibuffer support and therefore have to continue using the current
event_call-based filters.  For those cases, a new USE_CALL_FILTER bit
is added to the event_call flags, whose main purpose is to keep the
old behavior for those cases until they can be updated with
multibuffer support; at that point, the USE_CALL_FILTER flag (and the
new associated call_filter_check_discard() function) can go away.

The multibuffer support also made filter_current_check_discard()
redundant, so this change removes that function as well and replaces
it with filter_check_discard() (or call_filter_check_discard() as
appropriate).

Link: http://lkml.kernel.org/r/f16e9ce4270c62f46b2e966119225e1c3cca7e60.1382620672.git.tom.zanussi@linux.intel.comSigned-off-by: default avatarTom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent f02b625d
...@@ -202,6 +202,7 @@ enum { ...@@ -202,6 +202,7 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_WAS_ENABLED_BIT, TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
}; };
/* /*
...@@ -213,6 +214,7 @@ enum { ...@@ -213,6 +214,7 @@ enum {
* WAS_ENABLED - Set and stays set when an event was ever enabled * WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled, * (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it). * it is best to clear the buffers that used it).
* USE_CALL_FILTER - For ftrace internal events, don't use file filter
*/ */
enum { enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
...@@ -220,6 +222,7 @@ enum { ...@@ -220,6 +222,7 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
}; };
struct ftrace_event_call { struct ftrace_event_call {
...@@ -238,6 +241,7 @@ struct ftrace_event_call { ...@@ -238,6 +241,7 @@ struct ftrace_event_call {
* bit 2: failed to apply filter * bit 2: failed to apply filter
* bit 3: ftrace internal event (do not enable) * bit 3: ftrace internal event (do not enable)
* bit 4: Event was enabled by module * bit 4: Event was enabled by module
* bit 5: use call filter rather than file filter
*/ */
int flags; /* static flags of different events */ int flags; /* static flags of different events */
...@@ -253,6 +257,8 @@ struct ftrace_subsystem_dir; ...@@ -253,6 +257,8 @@ struct ftrace_subsystem_dir;
enum { enum {
FTRACE_EVENT_FL_ENABLED_BIT, FTRACE_EVENT_FL_ENABLED_BIT,
FTRACE_EVENT_FL_RECORDED_CMD_BIT, FTRACE_EVENT_FL_RECORDED_CMD_BIT,
FTRACE_EVENT_FL_FILTERED_BIT,
FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
FTRACE_EVENT_FL_SOFT_MODE_BIT, FTRACE_EVENT_FL_SOFT_MODE_BIT,
FTRACE_EVENT_FL_SOFT_DISABLED_BIT, FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
}; };
...@@ -261,6 +267,8 @@ enum { ...@@ -261,6 +267,8 @@ enum {
* Ftrace event file flags: * Ftrace event file flags:
* ENABLED - The event is enabled * ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch * RECORDED_CMD - The comms should be recorded at sched_switch
* FILTERED - The event has a filter attached
* NO_SET_FILTER - Set when filter has error and is to be ignored
* SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
* SOFT_DISABLED - When set, do not trace the event (even though its * SOFT_DISABLED - When set, do not trace the event (even though its
* tracepoint may be enabled) * tracepoint may be enabled)
...@@ -268,6 +276,8 @@ enum { ...@@ -268,6 +276,8 @@ enum {
enum { enum {
FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
}; };
...@@ -275,6 +285,7 @@ enum { ...@@ -275,6 +285,7 @@ enum {
struct ftrace_event_file { struct ftrace_event_file {
struct list_head list; struct list_head list;
struct ftrace_event_call *event_call; struct ftrace_event_call *event_call;
struct event_filter *filter;
struct dentry *dir; struct dentry *dir;
struct trace_array *tr; struct trace_array *tr;
struct ftrace_subsystem_dir *system; struct ftrace_subsystem_dir *system;
...@@ -310,12 +321,16 @@ struct ftrace_event_file { ...@@ -310,12 +321,16 @@ struct ftrace_event_file {
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
extern void destroy_preds(struct ftrace_event_call *call); extern void destroy_preds(struct ftrace_event_file *file);
extern void destroy_call_preds(struct ftrace_event_call *call);
extern int filter_match_preds(struct event_filter *filter, void *rec); extern int filter_match_preds(struct event_filter *filter, void *rec);
extern int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
void *rec, struct ring_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
enum { enum {
FILTER_OTHER = 0, FILTER_OTHER = 0,
......
...@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; ...@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_enter, \ .class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \ .event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\ .data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \ .flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\
}; \ }; \
static struct ftrace_event_call __used \ static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \ __attribute__((section("_ftrace_events"))) \
...@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; ...@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_exit, \ .class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \ .event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\ .data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \ .flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\
}; \ }; \
static struct ftrace_event_call __used \ static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \ __attribute__((section("_ftrace_events"))) \
......
...@@ -437,9 +437,8 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -437,9 +437,8 @@ static inline notrace int ftrace_get_offsets_##call( \
* { <assign>; } <-- Here we assign the entries by the __field and * { <assign>; } <-- Here we assign the entries by the __field and
* __array macros. * __array macros.
* *
* if (!filter_current_check_discard(buffer, event_call, entry, event)) * if (!filter_check_discard(ftrace_file, entry, buffer, event))
* trace_nowake_buffer_unlock_commit(buffer, * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
* event, irq_flags, pc);
* } * }
* *
* static struct trace_event ftrace_event_type_<call> = { * static struct trace_event ftrace_event_type_<call> = {
...@@ -553,7 +552,7 @@ ftrace_raw_event_##call(void *__data, proto) \ ...@@ -553,7 +552,7 @@ ftrace_raw_event_##call(void *__data, proto) \
\ \
{ assign; } \ { assign; } \
\ \
if (!filter_current_check_discard(buffer, event_call, entry, event)) \ if (!filter_check_discard(ftrace_file, entry, buffer, event)) \
trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
} }
/* /*
......
...@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr) ...@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr)
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
} }
int filter_current_check_discard(struct ring_buffer *buffer, int filter_check_discard(struct ftrace_event_file *file, void *rec,
struct ftrace_event_call *call, void *rec, struct ring_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
return filter_check_discard(call, rec, buffer, event); if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(file->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(filter_check_discard);
int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
}
return 0;
} }
EXPORT_SYMBOL_GPL(filter_current_check_discard); EXPORT_SYMBOL_GPL(call_filter_check_discard);
cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{ {
...@@ -1633,7 +1653,7 @@ trace_function(struct trace_array *tr, ...@@ -1633,7 +1653,7 @@ trace_function(struct trace_array *tr,
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
} }
...@@ -1717,7 +1737,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -1717,7 +1737,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
entry->size = trace.nr_entries; entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out: out:
...@@ -1819,7 +1839,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) ...@@ -1819,7 +1839,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
trace.entries = entry->caller; trace.entries = entry->caller;
save_stack_trace_user(&trace); save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out_drop_count: out_drop_count:
...@@ -2011,7 +2031,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -2011,7 +2031,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
entry->fmt = fmt; entry->fmt = fmt;
memcpy(entry->buf, tbuffer, sizeof(u32) * len); memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) { if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
} }
...@@ -2066,7 +2086,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, ...@@ -2066,7 +2086,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
memcpy(&entry->buf, tbuffer, len); memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0'; entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) { if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_stack(buffer, flags, 6, pc);
} }
......
...@@ -1007,9 +1007,9 @@ struct filter_pred { ...@@ -1007,9 +1007,9 @@ struct filter_pred {
extern enum regex_type extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not); filter_parse_regex(char *buff, int len, char **search, int *not);
extern void print_event_filter(struct ftrace_event_call *call, extern void print_event_filter(struct ftrace_event_file *file,
struct trace_seq *s); struct trace_seq *s);
extern int apply_event_filter(struct ftrace_event_call *call, extern int apply_event_filter(struct ftrace_event_file *file,
char *filter_string); char *filter_string);
extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
char *filter_string); char *filter_string);
...@@ -1020,20 +1020,6 @@ extern int filter_assign_type(const char *type); ...@@ -1020,20 +1020,6 @@ extern int filter_assign_type(const char *type);
struct ftrace_event_field * struct ftrace_event_field *
trace_find_event_field(struct ftrace_event_call *call, char *name); trace_find_event_field(struct ftrace_event_call *call, char *name);
static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
}
return 0;
}
extern void trace_event_enable_cmd_record(bool enable); extern void trace_event_enable_cmd_record(bool enable);
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr); extern int event_trace_del_tracer(struct trace_array *tr);
......
...@@ -78,7 +78,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -78,7 +78,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line; entry->line = f->line;
entry->correct = val == expect; entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out: out:
......
...@@ -989,7 +989,7 @@ static ssize_t ...@@ -989,7 +989,7 @@ static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos) loff_t *ppos)
{ {
struct ftrace_event_call *call; struct ftrace_event_file *file;
struct trace_seq *s; struct trace_seq *s;
int r = -ENODEV; int r = -ENODEV;
...@@ -1004,12 +1004,12 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, ...@@ -1004,12 +1004,12 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
trace_seq_init(s); trace_seq_init(s);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
call = event_file_data(filp); file = event_file_data(filp);
if (call) if (file)
print_event_filter(call, s); print_event_filter(file, s);
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
if (call) if (file)
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s); kfree(s);
...@@ -1021,7 +1021,7 @@ static ssize_t ...@@ -1021,7 +1021,7 @@ static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos) loff_t *ppos)
{ {
struct ftrace_event_call *call; struct ftrace_event_file *file;
char *buf; char *buf;
int err = -ENODEV; int err = -ENODEV;
...@@ -1039,9 +1039,9 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -1039,9 +1039,9 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
buf[cnt] = '\0'; buf[cnt] = '\0';
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
call = event_file_data(filp); file = event_file_data(filp);
if (call) if (file)
err = apply_event_filter(call, buf); err = apply_event_filter(file, buf);
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
free_page((unsigned long) buf); free_page((unsigned long) buf);
...@@ -1539,7 +1539,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) ...@@ -1539,7 +1539,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
return -1; return -1;
} }
} }
trace_create_file("filter", 0644, file->dir, call, trace_create_file("filter", 0644, file->dir, file,
&ftrace_event_filter_fops); &ftrace_event_filter_fops);
trace_create_file("format", 0444, file->dir, call, trace_create_file("format", 0444, file->dir, call,
...@@ -1577,6 +1577,7 @@ static void event_remove(struct ftrace_event_call *call) ...@@ -1577,6 +1577,7 @@ static void event_remove(struct ftrace_event_call *call)
if (file->event_call != call) if (file->event_call != call)
continue; continue;
ftrace_event_enable_disable(file, 0); ftrace_event_enable_disable(file, 0);
destroy_preds(file);
/* /*
* The do_for_each_event_file() is * The do_for_each_event_file() is
* a double loop. After finding the call for this * a double loop. After finding the call for this
...@@ -1700,7 +1701,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) ...@@ -1700,7 +1701,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
{ {
event_remove(call); event_remove(call);
trace_destroy_fields(call); trace_destroy_fields(call);
destroy_preds(call); destroy_call_preds(call);
} }
static int probe_remove_event_call(struct ftrace_event_call *call) static int probe_remove_event_call(struct ftrace_event_call *call)
......
This diff is collapsed.
...@@ -180,7 +180,7 @@ struct ftrace_event_call __used event_##call = { \ ...@@ -180,7 +180,7 @@ struct ftrace_event_call __used event_##call = { \
.event.type = etype, \ .event.type = etype, \
.class = &event_class_ftrace_##call, \ .class = &event_class_ftrace_##call, \
.print_fmt = print, \ .print_fmt = print, \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
}; \ }; \
struct ftrace_event_call __used \ struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
......
...@@ -270,7 +270,7 @@ int __trace_graph_entry(struct trace_array *tr, ...@@ -270,7 +270,7 @@ int __trace_graph_entry(struct trace_array *tr,
return 0; return 0;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->graph_ent = *trace; entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event)) if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
return 1; return 1;
...@@ -385,7 +385,7 @@ void __trace_graph_return(struct trace_array *tr, ...@@ -385,7 +385,7 @@ void __trace_graph_return(struct trace_array *tr,
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->ret = *trace; entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event)) if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
} }
......
...@@ -835,7 +835,7 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, ...@@ -835,7 +835,7 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
entry->ip = (unsigned long)tp->rp.kp.addr; entry->ip = (unsigned long)tp->rp.kp.addr;
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_check_discard(ftrace_file, entry, buffer, event))
trace_buffer_unlock_commit_regs(buffer, event, trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs); irq_flags, pc, regs);
} }
...@@ -884,7 +884,7 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, ...@@ -884,7 +884,7 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr; entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_check_discard(ftrace_file, entry, buffer, event))
trace_buffer_unlock_commit_regs(buffer, event, trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs); irq_flags, pc, regs);
} }
......
...@@ -323,7 +323,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -323,7 +323,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->rw = *rw; entry->rw = *rw;
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc); trace_buffer_unlock_commit(buffer, event, 0, pc);
} }
...@@ -353,7 +353,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -353,7 +353,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->map = *map; entry->map = *map;
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc); trace_buffer_unlock_commit(buffer, event, 0, pc);
} }
......
...@@ -45,7 +45,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -45,7 +45,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_state = next->state; entry->next_state = next->state;
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc); trace_buffer_unlock_commit(buffer, event, flags, pc);
} }
...@@ -101,7 +101,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -101,7 +101,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_state = wakee->state; entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc); trace_buffer_unlock_commit(buffer, event, flags, pc);
} }
......
...@@ -336,8 +336,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -336,8 +336,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
entry->nr = syscall_nr; entry->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
if (!filter_current_check_discard(buffer, sys_data->enter_event, if (!call_filter_check_discard(sys_data->enter_event, entry,
entry, event)) buffer, event))
trace_current_buffer_unlock_commit(buffer, event, trace_current_buffer_unlock_commit(buffer, event,
irq_flags, pc); irq_flags, pc);
} }
...@@ -377,8 +377,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -377,8 +377,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->nr = syscall_nr; entry->nr = syscall_nr;
entry->ret = syscall_get_return_value(current, regs); entry->ret = syscall_get_return_value(current, regs);
if (!filter_current_check_discard(buffer, sys_data->exit_event, if (!call_filter_check_discard(sys_data->exit_event, entry,
entry, event)) buffer, event))
trace_current_buffer_unlock_commit(buffer, event, trace_current_buffer_unlock_commit(buffer, event,
irq_flags, pc); irq_flags, pc);
} }
......
...@@ -128,6 +128,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) ...@@ -128,6 +128,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
if (is_ret) if (is_ret)
tu->consumer.ret_handler = uretprobe_dispatcher; tu->consumer.ret_handler = uretprobe_dispatcher;
init_trace_uprobe_filter(&tu->filter); init_trace_uprobe_filter(&tu->filter);
tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
return tu; return tu;
error: error:
...@@ -561,7 +562,7 @@ static void uprobe_trace_print(struct trace_uprobe *tu, ...@@ -561,7 +562,7 @@ static void uprobe_trace_print(struct trace_uprobe *tu,
for (i = 0; i < tu->nr_args; i++) for (i = 0; i < tu->nr_args; i++)
call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, 0); trace_buffer_unlock_commit(buffer, event, 0, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment