Commit 26b119bc authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Log irq_period changes

For the dynamic irq_period code, log whenever we change the period so that
analyzing code can normalize the event flow.

[ Impact: add new feature to allow more precise profiling ]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090520102553.298769743@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d7b629a3
...@@ -257,6 +257,14 @@ enum perf_event_type { ...@@ -257,6 +257,14 @@ enum perf_event_type {
*/ */
PERF_EVENT_COMM = 3, PERF_EVENT_COMM = 3,
/*
* struct {
* struct perf_event_header header;
* u64 irq_period;
* };
*/
PERF_EVENT_PERIOD = 4,
/* /*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
* will be PERF_RECORD_* * will be PERF_RECORD_*
......
...@@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void) ...@@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void)
return 0; return 0;
} }
void perf_adjust_freq(struct perf_counter_context *ctx) static void perf_log_period(struct perf_counter *counter, u64 period);
static void perf_adjust_freq(struct perf_counter_context *ctx)
{ {
struct perf_counter *counter; struct perf_counter *counter;
u64 irq_period; u64 irq_period;
...@@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx) ...@@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx)
if (!irq_period) if (!irq_period)
irq_period = 1; irq_period = 1;
perf_log_period(counter, irq_period);
counter->hw.irq_period = irq_period; counter->hw.irq_period = irq_period;
counter->hw.interrupts = 0; counter->hw.interrupts = 0;
} }
...@@ -2406,6 +2410,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -2406,6 +2410,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
perf_counter_mmap_event(&mmap_event); perf_counter_mmap_event(&mmap_event);
} }
/*
*
*/
static void perf_log_period(struct perf_counter *counter, u64 period)
{
struct perf_output_handle handle;
int ret;
struct {
struct perf_event_header header;
u64 time;
u64 period;
} freq_event = {
.header = {
.type = PERF_EVENT_PERIOD,
.misc = 0,
.size = sizeof(freq_event),
},
.time = sched_clock(),
.period = period,
};
if (counter->hw.irq_period == period)
return;
ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
if (ret)
return;
perf_output_put(&handle, freq_event);
perf_output_end(&handle);
}
/* /*
* Generic counter overflow handling. * Generic counter overflow handling.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment