Commit 53b441a5 authored by Ingo Molnar's avatar Ingo Molnar

Revert "perf_counter, x86: speed up the scheduling fast-path"

This reverts commit b68f1d2e.

It is causing problems (stuck/stuttering profiling) - when mixed
NMI and non-NMI counters are used.

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.703093461@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a78ac325
...@@ -293,7 +293,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -293,7 +293,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
return -EACCES; return -EACCES;
hwc->nmi = 1; hwc->nmi = 1;
} }
perf_counters_lapic_init(hwc->nmi);
if (!hwc->irq_period) if (!hwc->irq_period)
hwc->irq_period = x86_pmu.max_period; hwc->irq_period = x86_pmu.max_period;
...@@ -612,6 +611,8 @@ static int x86_pmu_enable(struct perf_counter *counter) ...@@ -612,6 +611,8 @@ static int x86_pmu_enable(struct perf_counter *counter)
hwc->counter_base = x86_pmu.perfctr; hwc->counter_base = x86_pmu.perfctr;
} }
perf_counters_lapic_init(hwc->nmi);
x86_pmu.disable(hwc, idx); x86_pmu.disable(hwc, idx);
cpuc->counters[idx] = counter; cpuc->counters[idx] = counter;
...@@ -1037,7 +1038,7 @@ void __init init_hw_perf_counters(void) ...@@ -1037,7 +1038,7 @@ void __init init_hw_perf_counters(void)
pr_info("... counter mask: %016Lx\n", perf_counter_mask); pr_info("... counter mask: %016Lx\n", perf_counter_mask);
perf_counters_lapic_init(1); perf_counters_lapic_init(0);
register_die_notifier(&perf_counter_nmi_notifier); register_die_notifier(&perf_counter_nmi_notifier);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment