Commit eb02ce01 authored by Tom Zanussi's avatar Tom Zanussi Committed by Ingo Molnar

tracing/filters: use ring_buffer_discard_commit() in filter_check_discard()

This patch changes filter_check_discard() to make use of the new
ring_buffer_discard_commit() function and modifies the current users to
call the old commit function in the non-discard case.

It also introduces a version of filter_check_discard() that uses the
global trace buffer (filter_current_check_discard()) for those cases.

v2 changes:

- fix compile error noticed by Ingo Molnar
Signed-off-by: default avatarTom Zanussi <tzanussi@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: fweisbec@gmail.com
LKML-Reference: <1239178554.10295.36.camel@tropicana>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 5f77a88b
...@@ -63,9 +63,8 @@ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, ...@@ -63,9 +63,8 @@ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
entry->gfp_flags = gfp_flags; entry->gfp_flags = gfp_flags;
entry->node = node; entry->node = node;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event);
ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
} }
...@@ -90,9 +89,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, ...@@ -90,9 +89,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
entry->call_site = call_site; entry->call_site = call_site;
entry->ptr = ptr; entry->ptr = ptr;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event);
ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
} }
......
...@@ -171,6 +171,12 @@ static struct trace_array global_trace; ...@@ -171,6 +171,12 @@ static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
int filter_current_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event)
{
return filter_check_discard(call, rec, global_trace.buffer, event);
}
cycle_t ftrace_now(int cpu) cycle_t ftrace_now(int cpu)
{ {
u64 ts; u64 ts;
...@@ -919,9 +925,8 @@ trace_function(struct trace_array *tr, ...@@ -919,9 +925,8 @@ trace_function(struct trace_array *tr,
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event);
ring_buffer_unlock_commit(tr->buffer, event);
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -943,8 +948,8 @@ static int __trace_graph_entry(struct trace_array *tr, ...@@ -943,8 +948,8 @@ static int __trace_graph_entry(struct trace_array *tr,
return 0; return 0;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->graph_ent = *trace; entry->graph_ent = *trace;
filter_check_discard(call, entry, event); if (!filter_current_check_discard(call, entry, event))
ring_buffer_unlock_commit(global_trace.buffer, event); ring_buffer_unlock_commit(global_trace.buffer, event);
return 1; return 1;
} }
...@@ -967,8 +972,8 @@ static void __trace_graph_return(struct trace_array *tr, ...@@ -967,8 +972,8 @@ static void __trace_graph_return(struct trace_array *tr,
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->ret = *trace; entry->ret = *trace;
filter_check_discard(call, entry, event); if (!filter_current_check_discard(call, entry, event))
ring_buffer_unlock_commit(global_trace.buffer, event); ring_buffer_unlock_commit(global_trace.buffer, event);
} }
#endif #endif
...@@ -1004,8 +1009,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, ...@@ -1004,8 +1009,8 @@ static void __ftrace_trace_stack(struct trace_array *tr,
trace.entries = entry->caller; trace.entries = entry->caller;
save_stack_trace(&trace); save_stack_trace(&trace);
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(tr->buffer, event);
#endif #endif
} }
...@@ -1052,8 +1057,8 @@ static void ftrace_trace_userstack(struct trace_array *tr, ...@@ -1052,8 +1057,8 @@ static void ftrace_trace_userstack(struct trace_array *tr,
trace.entries = entry->caller; trace.entries = entry->caller;
save_stack_trace_user(&trace); save_stack_trace_user(&trace);
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(tr->buffer, event);
#endif #endif
} }
...@@ -1114,9 +1119,8 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -1114,9 +1119,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_state = next->state; entry->next_state = next->state;
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
trace_buffer_unlock_commit(tr, event, flags, pc);
trace_buffer_unlock_commit(tr, event, flags, pc);
} }
void void
...@@ -1142,9 +1146,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -1142,9 +1146,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_state = wakee->state; entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event);
ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc); ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc); ftrace_trace_userstack(tr, flags, pc);
} }
...@@ -1285,8 +1288,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -1285,8 +1288,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
entry->fmt = fmt; entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len); memcpy(entry->buf, trace_buf, sizeof(u32) * len);
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(tr->buffer, event);
out_unlock: out_unlock:
__raw_spin_unlock(&trace_buf_lock); __raw_spin_unlock(&trace_buf_lock);
...@@ -1341,8 +1344,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -1341,8 +1344,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(&entry->buf, trace_buf, len); memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = 0; entry->buf[len] = 0;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(tr->buffer, event);
out_unlock: out_unlock:
__raw_spin_unlock(&trace_buf_lock); __raw_spin_unlock(&trace_buf_lock);
......
...@@ -866,13 +866,21 @@ extern int filter_match_preds(struct ftrace_event_call *call, void *rec); ...@@ -866,13 +866,21 @@ extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
extern void filter_free_subsystem_preds(struct event_subsystem *system); extern void filter_free_subsystem_preds(struct event_subsystem *system);
extern int filter_add_subsystem_pred(struct event_subsystem *system, extern int filter_add_subsystem_pred(struct event_subsystem *system,
struct filter_pred *pred); struct filter_pred *pred);
extern int filter_current_check_discard(struct ftrace_event_call *call,
void *rec,
struct ring_buffer_event *event);
static inline void static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec, filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
if (unlikely(call->preds) && !filter_match_preds(call, rec)) if (unlikely(call->preds) && !filter_match_preds(call, rec)) {
ring_buffer_event_discard(event); ring_buffer_discard_commit(buffer, event);
return 1;
}
return 0;
} }
#define __common_field(type, item) \ #define __common_field(type, item) \
......
...@@ -74,9 +74,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -74,9 +74,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line; entry->line = f->line;
entry->correct = val == expect; entry->correct = val == expect;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
ring_buffer_unlock_commit(tr->buffer, event);
ring_buffer_unlock_commit(tr->buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -222,11 +222,8 @@ static void ftrace_raw_event_##call(proto) \ ...@@ -222,11 +222,8 @@ static void ftrace_raw_event_##call(proto) \
\ \
assign; \ assign; \
\ \
if (call->preds && !filter_match_preds(call, entry)) \ if (!filter_current_check_discard(call, entry, event)) \
trace_current_buffer_discard_commit(event); \
else \
trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
\
} \ } \
\ \
static int ftrace_raw_reg_event_##call(void) \ static int ftrace_raw_reg_event_##call(void) \
......
...@@ -195,8 +195,8 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -195,8 +195,8 @@ void trace_hw_branch(u64 from, u64 to)
entry->ent.type = TRACE_HW_BRANCHES; entry->ent.type = TRACE_HW_BRANCHES;
entry->from = from; entry->from = from;
entry->to = to; entry->to = to;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
trace_buffer_unlock_commit(tr, event, 0, 0); trace_buffer_unlock_commit(tr, event, 0, 0);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -55,8 +55,8 @@ static void probe_power_end(struct power_trace *it) ...@@ -55,8 +55,8 @@ static void probe_power_end(struct power_trace *it)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->state_data = *it; entry->state_data = *it;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
trace_buffer_unlock_commit(tr, event, 0, 0); trace_buffer_unlock_commit(tr, event, 0, 0);
out: out:
preempt_enable(); preempt_enable();
} }
...@@ -87,8 +87,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, ...@@ -87,8 +87,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->state_data = *it; entry->state_data = *it;
filter_check_discard(call, entry, event); if (!filter_check_discard(call, entry, tr->buffer, event))
trace_buffer_unlock_commit(tr, event, 0, 0); trace_buffer_unlock_commit(tr, event, 0, 0);
out: out:
preempt_enable(); preempt_enable();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment