Commit 95d0ad04 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf_counter: Fix/complete ftrace event records sampling
  perf_counter, ftrace: Fix perf_counter integration
  tracing/filters: Always free pred on filter_add_subsystem_pred() failure
  tracing/filters: Don't use pred on alloc failure
  ring-buffer: Fix memleak in ring_buffer_free()
  tracing: Fix recordmcount.pl to handle sections with only weak functions
  ring-buffer: Fix advance of reader in rb_buffer_peek()
  tracing: do not use functions starting with .L in recordmcount.pl
  ring-buffer: do not disable ring buffer on oops_in_progress
  ring-buffer: fix check of try_to_discard result
parents 413dd876 f413cdb8
...@@ -89,7 +89,9 @@ enum print_line_t { ...@@ -89,7 +89,9 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
}; };
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
struct ring_buffer_event * struct ring_buffer_event *
trace_current_buffer_lock_reserve(int type, unsigned long len, trace_current_buffer_lock_reserve(int type, unsigned long len,
unsigned long flags, int pc); unsigned long flags, int pc);
......
...@@ -121,8 +121,9 @@ enum perf_counter_sample_format { ...@@ -121,8 +121,9 @@ enum perf_counter_sample_format {
PERF_SAMPLE_CPU = 1U << 7, PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8, PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9, PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_TP_RECORD = 1U << 10,
PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
}; };
/* /*
...@@ -413,6 +414,11 @@ struct perf_callchain_entry { ...@@ -413,6 +414,11 @@ struct perf_callchain_entry {
__u64 ip[PERF_MAX_STACK_DEPTH]; __u64 ip[PERF_MAX_STACK_DEPTH];
}; };
struct perf_tracepoint_record {
int size;
char *record;
};
struct task_struct; struct task_struct;
/** /**
...@@ -681,6 +687,7 @@ struct perf_sample_data { ...@@ -681,6 +687,7 @@ struct perf_sample_data {
struct pt_regs *regs; struct pt_regs *regs;
u64 addr; u64 addr;
u64 period; u64 period;
void *private;
}; };
extern int perf_counter_overflow(struct perf_counter *counter, int nmi, extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
......
...@@ -144,6 +144,9 @@ ...@@ -144,6 +144,9 @@
#undef TP_fast_assign #undef TP_fast_assign
#define TP_fast_assign(args...) args #define TP_fast_assign(args...) args
#undef TP_perf_assign
#define TP_perf_assign(args...)
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \ static int \
...@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \ ...@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#ifdef CONFIG_EVENT_PROFILE
/*
* Generate the functions needed for tracepoint perf_counter support.
*
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
*
* static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
* {
* int ret = 0;
*
* if (!atomic_inc_return(&event_call->profile_count))
* ret = register_trace_<call>(ftrace_profile_<call>);
*
* return ret;
* }
*
* static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
* {
* if (atomic_add_negative(-1, &event->call->profile_count))
* unregister_trace_<call>(ftrace_profile_<call>);
* }
*
*/
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
\
static void ftrace_profile_##call(proto); \
\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{ \
int ret = 0; \
\
if (!atomic_inc_return(&event_call->profile_count)) \
ret = register_trace_##call(ftrace_profile_##call); \
\
return ret; \
} \
\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{ \
if (atomic_add_negative(-1, &event_call->profile_count)) \
unregister_trace_##call(ftrace_profile_##call); \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif
/* /*
* Stage 4 of the trace events. * Stage 4 of the trace events.
* *
...@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \ ...@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
#define TP_FMT(fmt, args...) fmt "\n", ##args #define TP_FMT(fmt, args...) fmt "\n", ##args
#ifdef CONFIG_EVENT_PROFILE #ifdef CONFIG_EVENT_PROFILE
#define _TRACE_PROFILE(call, proto, args) \
static void ftrace_profile_##call(proto) \
{ \
extern void perf_tpcounter_event(int); \
perf_tpcounter_event(event_##call.id); \
} \
\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{ \
int ret = 0; \
\
if (!atomic_inc_return(&event_call->profile_count)) \
ret = register_trace_##call(ftrace_profile_##call); \
\
return ret; \
} \
\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{ \
if (atomic_add_negative(-1, &event_call->profile_count)) \
unregister_trace_##call(ftrace_profile_##call); \
}
#define _TRACE_PROFILE_INIT(call) \ #define _TRACE_PROFILE_INIT(call) \
.profile_count = ATOMIC_INIT(-1), \ .profile_count = ATOMIC_INIT(-1), \
...@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ ...@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
.profile_disable = ftrace_profile_disable_##call, .profile_disable = ftrace_profile_disable_##call,
#else #else
#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call) #define _TRACE_PROFILE_INIT(call)
#endif #endif
...@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ ...@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
\ \
static struct ftrace_event_call event_##call; \ static struct ftrace_event_call event_##call; \
\ \
...@@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ ...@@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef _TRACE_PROFILE /*
* Define the insertion callback to profile events
*
* The job is very similar to ftrace_raw_event_<call> except that we don't
* insert in the ring buffer but in a perf counter.
*
* static void ftrace_profile_<call>(proto)
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
* extern void perf_tpcounter_event(int, u64, u64, void *, int);
* struct ftrace_raw_##call *entry;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
* int __entry_size;
* int __data_size;
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
* __entry_size = __data_size + sizeof(*entry);
*
* do {
* char raw_data[__entry_size]; <- allocate our sample in the stack
* struct trace_entry *ent;
*
* entry = (struct ftrace_raw_<call> *)raw_data;
* ent = &entry->ent;
* tracing_generic_entry_update(ent, irq_flags, pc);
* ent->type = event_call->id;
*
* <tstruct> <- do some jobs with dynamic arrays
*
* <assign> <- affect our values
*
* perf_tpcounter_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
* } while (0);
*
* }
*/
#ifdef CONFIG_EVENT_PROFILE
#undef __perf_addr
#define __perf_addr(a) __addr = (a)
#undef __perf_count
#define __perf_count(c) __count = (c)
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
static void ftrace_profile_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_call *event_call = &event_##call; \
extern void perf_tpcounter_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
int __entry_size; \
int __data_size; \
int pc; \
\
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
__entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
\
do { \
char raw_data[__entry_size]; \
struct trace_entry *ent; \
\
entry = (struct ftrace_raw_##call *)raw_data; \
ent = &entry->ent; \
tracing_generic_entry_update(ent, irq_flags, pc); \
ent->type = event_call->id; \
\
tstruct \
\
{ assign; } \
\
perf_tpcounter_event(event_call->id, __addr, __count, entry,\
__entry_size); \
} while (0); \
\
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */
#undef _TRACE_PROFILE_INIT #undef _TRACE_PROFILE_INIT
...@@ -2646,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, ...@@ -2646,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
u64 counter; u64 counter;
} group_entry; } group_entry;
struct perf_callchain_entry *callchain = NULL; struct perf_callchain_entry *callchain = NULL;
struct perf_tracepoint_record *tp;
int callchain_size = 0; int callchain_size = 0;
u64 time; u64 time;
struct { struct {
...@@ -2714,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, ...@@ -2714,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
header.size += sizeof(u64); header.size += sizeof(u64);
} }
if (sample_type & PERF_SAMPLE_TP_RECORD) {
tp = data->private;
header.size += tp->size;
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1); ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret) if (ret)
return; return;
...@@ -2777,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, ...@@ -2777,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
} }
} }
if (sample_type & PERF_SAMPLE_TP_RECORD)
perf_output_copy(&handle, tp->record, tp->size);
perf_output_end(&handle); perf_output_end(&handle);
} }
...@@ -3703,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = { ...@@ -3703,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = {
}; };
#ifdef CONFIG_EVENT_PROFILE #ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id) void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
int entry_size)
{ {
struct perf_tracepoint_record tp = {
.size = entry_size,
.record = record,
};
struct perf_sample_data data = { struct perf_sample_data data = {
.regs = get_irq_regs(), .regs = get_irq_regs(),
.addr = 0, .addr = addr,
.private = &tp,
}; };
if (!data.regs) if (!data.regs)
data.regs = task_pt_regs(current); data.regs = task_pt_regs(current);
do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
} }
EXPORT_SYMBOL_GPL(perf_tpcounter_event); EXPORT_SYMBOL_GPL(perf_tpcounter_event);
......
...@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer) ...@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer)
put_online_cpus(); put_online_cpus();
kfree(buffer->buffers);
free_cpumask_var(buffer->cpumask); free_cpumask_var(buffer->cpumask);
kfree(buffer); kfree(buffer);
...@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, ...@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
*/ */
RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
if (!rb_try_to_discard(cpu_buffer, event)) if (rb_try_to_discard(cpu_buffer, event))
goto out; goto out;
/* /*
...@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
* the box. Return the padding, and we will release * the box. Return the padding, and we will release
* the current locks, and try again. * the current locks, and try again.
*/ */
rb_advance_reader(cpu_buffer);
return event; return event;
case RINGBUF_TYPE_TIME_EXTEND: case RINGBUF_TYPE_TIME_EXTEND:
...@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void) ...@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void)
* buffer too. A one time deal is all you get from reading * buffer too. A one time deal is all you get from reading
* the ring buffer from an NMI. * the ring buffer from an NMI.
*/ */
if (likely(!in_nmi() && !oops_in_progress)) if (likely(!in_nmi()))
return 1; return 1;
tracing_off_permanent(); tracing_off_permanent();
...@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
if (dolock) if (dolock)
spin_lock(&cpu_buffer->reader_lock); spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts); event = rb_buffer_peek(buffer, cpu, ts);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
if (dolock) if (dolock)
spin_unlock(&cpu_buffer->reader_lock); spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
spin_lock(&cpu_buffer->reader_lock); spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts); event = rb_buffer_peek(buffer, cpu, ts);
if (!event) if (event)
goto out_unlock; rb_advance_reader(cpu_buffer);
rb_advance_reader(cpu_buffer);
out_unlock:
if (dolock) if (dolock)
spin_unlock(&cpu_buffer->reader_lock); spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ...@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
} }
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
int type, int type,
......
...@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, ...@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts); int *ent_cpu, u64 *ent_ts);
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
void default_wait_pipe(struct trace_iterator *iter); void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter); void poll_wait_pipe(struct trace_iterator *iter);
......
...@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, ...@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
return -ENOSPC; return -ENOSPC;
} }
filter->preds[filter->n_preds] = pred;
filter->n_preds++;
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields) if (!call->define_fields)
...@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, ...@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
} }
replace_filter_string(call->filter, filter_string); replace_filter_string(call->filter, filter_string);
} }
filter->preds[filter->n_preds] = pred;
filter->n_preds++;
out: out:
return err; return err;
} }
...@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system, ...@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system,
if (elt->op == OP_AND || elt->op == OP_OR) { if (elt->op == OP_AND || elt->op == OP_OR) {
pred = create_logical_pred(elt->op); pred = create_logical_pred(elt->op);
if (!pred)
return -ENOMEM;
if (call) { if (call) {
err = filter_add_pred(ps, call, pred); err = filter_add_pred(ps, call, pred);
filter_free_pred(pred); filter_free_pred(pred);
} else } else {
err = filter_add_subsystem_pred(ps, system, err = filter_add_subsystem_pred(ps, system,
pred, filter_string); pred, filter_string);
if (err)
filter_free_pred(pred);
}
if (err) if (err)
return err; return err;
...@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system, ...@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system,
} }
pred = create_pred(elt->op, operand1, operand2); pred = create_pred(elt->op, operand1, operand2);
if (!pred)
return -ENOMEM;
if (call) { if (call) {
err = filter_add_pred(ps, call, pred); err = filter_add_pred(ps, call, pred);
filter_free_pred(pred); filter_free_pred(pred);
} else } else {
err = filter_add_subsystem_pred(ps, system, pred, err = filter_add_subsystem_pred(ps, system, pred,
filter_string); filter_string);
if (err)
filter_free_pred(pred);
}
if (err) if (err)
return err; return err;
......
...@@ -393,7 +393,7 @@ while (<IN>) { ...@@ -393,7 +393,7 @@ while (<IN>) {
$read_function = 0; $read_function = 0;
} }
# print out any recorded offsets # print out any recorded offsets
update_funcs() if ($text_found); update_funcs() if (defined($ref_func));
# reset all markers and arrays # reset all markers and arrays
$text_found = 0; $text_found = 0;
...@@ -414,7 +414,10 @@ while (<IN>) { ...@@ -414,7 +414,10 @@ while (<IN>) {
$offset = hex $1; $offset = hex $1;
} else { } else {
# if we already have a function, and this is weak, skip it # if we already have a function, and this is weak, skip it
if (!defined($ref_func) && !defined($weak{$text})) { if (!defined($ref_func) && !defined($weak{$text}) &&
# PPC64 can have symbols that start with .L and
# gcc considers these special. Don't use them!
$text !~ /^\.L/) {
$ref_func = $text; $ref_func = $text;
$offset = hex $1; $offset = hex $1;
} }
...@@ -441,7 +444,7 @@ while (<IN>) { ...@@ -441,7 +444,7 @@ while (<IN>) {
} }
# dump out anymore offsets that may have been found # dump out anymore offsets that may have been found
update_funcs() if ($text_found); update_funcs() if (defined($ref_func));
# If we did not find any mcount callers, we are done (do nothing). # If we did not find any mcount callers, we are done (do nothing).
if (!$opened) { if (!$opened) {
......
...@@ -412,6 +412,7 @@ static void create_counter(int counter, int cpu, pid_t pid) ...@@ -412,6 +412,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
if (call_graph) if (call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN; attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
attr->mmap = track; attr->mmap = track;
attr->comm = track; attr->comm = track;
attr->inherit = (cpu < 0) && inherit; attr->inherit = (cpu < 0) && inherit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment