Commit 71e2d282 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE)

Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result
in a double disable, cure this by using x86_pmu_{start,stop} for
throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c08053e6
...@@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event) ...@@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)
static void x86_pmu_unthrottle(struct perf_event *event) static void x86_pmu_unthrottle(struct perf_event *event)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int ret = x86_pmu_start(event);
struct hw_perf_event *hwc = &event->hw; WARN_ON_ONCE(ret);
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
cpuc->events[hwc->idx] != event))
return;
x86_pmu.enable(event);
} }
void perf_event_print_debug(void) void perf_event_print_debug(void)
...@@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event) ...@@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
/* if (!__test_and_clear_bit(idx, cpuc->active_mask))
* Must be done before we disable, otherwise the nmi handler return;
* could reenable again:
*/
__clear_bit(idx, cpuc->active_mask);
x86_pmu.disable(event); x86_pmu.disable(event);
/* /*
...@@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) ...@@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
continue; continue;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, 1, &data, regs))
x86_pmu.disable(event); x86_pmu_stop(event);
} }
if (handled) if (handled)
......
...@@ -774,7 +774,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -774,7 +774,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
data.period = event->hw.last_period; data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, 1, &data, regs))
intel_pmu_disable_event(event); x86_pmu_stop(event);
} }
intel_pmu_ack_status(ack); intel_pmu_ack_status(ack);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment