Commit ca8f2d7f authored by Paul Mackerras's avatar Paul Mackerras Committed by Ingo Molnar

perf_counter: powerpc: add nmi_enter/nmi_exit calls

Impact: fix potential deadlocks on powerpc

Now that the core is using in_nmi() (added in e30e08f6, "perf_counter:
fix NMI race in task clock"), we need the powerpc perf_counter_interrupt
to call nmi_enter() and nmi_exit() in those cases where the interrupt
happens when interrupts are soft-disabled.

If interrupts were soft-enabled, we can treat it as a regular interrupt
and do irq_enter/irq_exit around the whole routine. This lets us get rid
of the test_perf_counter_pending() call at the end of
perf_counter_interrupt, thus simplifying things a little.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <18909.31952.873098.336615@cargo.ozlabs.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6c0b3244
...@@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter) ...@@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter)
* here so there is no possibility of being interrupted. * here so there is no possibility of being interrupted.
*/ */
static void record_and_restart(struct perf_counter *counter, long val, static void record_and_restart(struct perf_counter *counter, long val,
struct pt_regs *regs) struct pt_regs *regs, int nmi)
{ {
s64 prev, delta, left; s64 prev, delta, left;
int record = 0; int record = 0;
...@@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val, ...@@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
* Finally record data if requested. * Finally record data if requested.
*/ */
if (record) if (record)
perf_counter_overflow(counter, 1, regs, 0); perf_counter_overflow(counter, nmi, regs, 0);
} }
/* /*
...@@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs) ...@@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs)
struct perf_counter *counter; struct perf_counter *counter;
long val; long val;
int found = 0; int found = 0;
int nmi;
/*
* If interrupts were soft-disabled when this PMU interrupt
* occurred, treat it as an NMI.
*/
nmi = !regs->softe;
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_counters; ++i) { for (i = 0; i < cpuhw->n_counters; ++i) {
counter = cpuhw->counter[i]; counter = cpuhw->counter[i];
...@@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) ...@@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
if ((int)val < 0) { if ((int)val < 0) {
/* counter has overflowed */ /* counter has overflowed */
found = 1; found = 1;
record_and_restart(counter, val, regs); record_and_restart(counter, val, regs, nmi);
} }
} }
...@@ -796,18 +807,10 @@ static void perf_counter_interrupt(struct pt_regs *regs) ...@@ -796,18 +807,10 @@ static void perf_counter_interrupt(struct pt_regs *regs)
*/ */
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
/* if (nmi)
* If we need a wakeup, check whether interrupts were soft-enabled nmi_exit();
* when we took the interrupt. If they were, we can wake stuff up else
* immediately; otherwise we'll have do the wakeup when interrupts
* get soft-enabled.
*/
if (test_perf_counter_pending() && regs->softe) {
irq_enter();
clear_perf_counter_pending();
perf_counter_do_pending();
irq_exit(); irq_exit();
}
} }
void hw_perf_counter_setup(int cpu) void hw_perf_counter_setup(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment