Commit 2b0b5c6f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf, x86: Consolidate some code repetition

Remove some duplicated logic.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1e9a6d8d
...@@ -452,14 +452,54 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) ...@@ -452,14 +452,54 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
static int intel_pmu_save_and_restart(struct perf_event *event); static int intel_pmu_save_and_restart(struct perf_event *event);
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
/*
* We cast to pebs_record_core since that is a subset of
* both formats and we don't use the other fields in this
* routine.
*/
struct pebs_record_core *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
if (!intel_pmu_save_and_restart(event))
return;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
/*
* We use the interrupt regs as a base because the PEBS record
* does not contain a full regs set, specifically it seems to
* lack segment descriptors, which get used by things like
* user_mode().
*
* In the simple case fix up only the IP and BP,SP regs, for
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
regs.ip = pebs->ip;
regs.bp = pebs->bp;
regs.sp = pebs->sp;
if (intel_pmu_pebs_fixup_ip(regs))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, 1, &data, &regs))
x86_pmu_stop(event);
}
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
struct perf_event *event = cpuc->events[0]; /* PMC0 only */ struct perf_event *event = cpuc->events[0]; /* PMC0 only */
struct pebs_record_core *at, *top; struct pebs_record_core *at, *top;
struct perf_sample_data data;
struct pt_regs regs;
int n; int n;
if (!ds || !x86_pmu.pebs) if (!ds || !x86_pmu.pebs)
...@@ -485,9 +525,6 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) ...@@ -485,9 +525,6 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
if (n <= 0) if (n <= 0)
return; return;
if (!intel_pmu_save_and_restart(event))
return;
/* /*
* Should not happen, we program the threshold at 1 and do not * Should not happen, we program the threshold at 1 and do not
* set a reset value. * set a reset value.
...@@ -495,31 +532,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) ...@@ -495,31 +532,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
WARN_ON_ONCE(n > 1); WARN_ON_ONCE(n > 1);
at += n - 1; at += n - 1;
perf_sample_data_init(&data, 0); __intel_pmu_pebs_event(event, iregs, at);
data.period = event->hw.last_period;
/*
* We use the interrupt regs as a base because the PEBS record
* does not contain a full regs set, specifically it seems to
* lack segment descriptors, which get used by things like
* user_mode().
*
* In the simple case fix up only the IP and BP,SP regs, for
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
regs.ip = at->ip;
regs.bp = at->bp;
regs.sp = at->sp;
if (intel_pmu_pebs_fixup_ip(&regs))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, 1, &data, &regs))
x86_pmu_stop(event);
} }
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
...@@ -527,9 +540,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ...@@ -527,9 +540,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
struct pebs_record_nhm *at, *top; struct pebs_record_nhm *at, *top;
struct perf_sample_data data;
struct perf_event *event = NULL; struct perf_event *event = NULL;
struct pt_regs regs;
u64 status = 0; u64 status = 0;
int bit, n; int bit, n;
...@@ -571,27 +582,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ...@@ -571,27 +582,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
if (!event || bit >= MAX_PEBS_EVENTS) if (!event || bit >= MAX_PEBS_EVENTS)
continue; continue;
if (!intel_pmu_save_and_restart(event)) __intel_pmu_pebs_event(event, iregs, at);
continue;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
/*
* See the comment in intel_pmu_drain_pebs_core()
*/
regs = *iregs;
regs.ip = at->ip;
regs.bp = at->bp;
regs.sp = at->sp;
if (intel_pmu_pebs_fixup_ip(&regs))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, 1, &data, &regs))
x86_pmu_stop(event);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment