Commit 689802b2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Add PERF_SAMPLE_PERIOD

In order to allow easy tracking of the period, also provide means of
adding it to the sample data.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ac4bcf88
...@@ -106,6 +106,7 @@ enum perf_counter_sample_format { ...@@ -106,6 +106,7 @@ enum perf_counter_sample_format {
PERF_SAMPLE_CALLCHAIN = 1U << 5, PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6, PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7, PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
}; };
/* /*
...@@ -260,6 +261,7 @@ enum perf_event_type { ...@@ -260,6 +261,7 @@ enum perf_event_type {
* struct { * struct {
* struct perf_event_header header; * struct perf_event_header header;
* u64 time; * u64 time;
* u64 id;
* u64 sample_period; * u64 sample_period;
* }; * };
*/ */
......
...@@ -2404,6 +2404,11 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -2404,6 +2404,11 @@ static void perf_counter_output(struct perf_counter *counter,
cpu_entry.cpu = raw_smp_processor_id(); cpu_entry.cpu = raw_smp_processor_id();
} }
if (sample_type & PERF_SAMPLE_PERIOD) {
header.type |= PERF_SAMPLE_PERIOD;
header.size += sizeof(u64);
}
if (sample_type & PERF_SAMPLE_GROUP) { if (sample_type & PERF_SAMPLE_GROUP) {
header.type |= PERF_SAMPLE_GROUP; header.type |= PERF_SAMPLE_GROUP;
header.size += sizeof(u64) + header.size += sizeof(u64) +
...@@ -2445,6 +2450,9 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -2445,6 +2450,9 @@ static void perf_counter_output(struct perf_counter *counter,
if (sample_type & PERF_SAMPLE_CPU) if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(&handle, cpu_entry); perf_output_put(&handle, cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(&handle, counter->hw.sample_period);
/* /*
* XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
*/ */
...@@ -2835,6 +2843,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) ...@@ -2835,6 +2843,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
struct { struct {
struct perf_event_header header; struct perf_event_header header;
u64 time; u64 time;
u64 id;
u64 period; u64 period;
} freq_event = { } freq_event = {
.header = { .header = {
...@@ -2843,6 +2852,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) ...@@ -2843,6 +2852,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
.size = sizeof(freq_event), .size = sizeof(freq_event),
}, },
.time = sched_clock(), .time = sched_clock(),
.id = counter->id,
.period = period, .period = period,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment