Commit 48e22d56 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: x86: Remove interrupt throttle

remove the x86 specific interrupt throttle
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.616671838@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ff99be57
...@@ -763,8 +763,6 @@ static void local_apic_timer_interrupt(void) ...@@ -763,8 +763,6 @@ static void local_apic_timer_interrupt(void)
inc_irq_stat(apic_timer_irqs); inc_irq_stat(apic_timer_irqs);
evt->event_handler(evt); evt->event_handler(evt);
perf_counter_unthrottle();
} }
/* /*
......
...@@ -718,11 +718,6 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) ...@@ -718,11 +718,6 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
intel_pmu_enable_counter(hwc, idx); intel_pmu_enable_counter(hwc, idx);
} }
/*
* Maximum interrupt frequency of 100KHz per CPU
*/
#define PERFMON_MAX_INTERRUPTS (100000/HZ)
/* /*
* This handler is triggered by the local APIC, so the APIC IRQ handling * This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply: * rules apply:
...@@ -775,7 +770,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -775,7 +770,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
if (status) if (status)
goto again; goto again;
if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS)
perf_enable(); perf_enable();
return 1; return 1;
...@@ -783,7 +777,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -783,7 +777,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
{ {
int cpu, idx, throttle = 0, handled = 0; int cpu, idx, handled = 0;
struct cpu_hw_counters *cpuc; struct cpu_hw_counters *cpuc;
struct perf_counter *counter; struct perf_counter *counter;
struct hw_perf_counter *hwc; struct hw_perf_counter *hwc;
...@@ -792,16 +786,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -792,16 +786,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
cpu = smp_processor_id(); cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu); cpuc = &per_cpu(cpu_hw_counters, cpu);
if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
throttle = 1;
__perf_disable();
cpuc->enabled = 0;
barrier();
}
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int disable = 0;
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
...@@ -809,45 +794,23 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -809,45 +794,23 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
hwc = &counter->hw; hwc = &counter->hw;
if (counter->hw_event.nmi != nmi) if (counter->hw_event.nmi != nmi)
goto next; continue;
val = x86_perf_counter_update(counter, hwc, idx); val = x86_perf_counter_update(counter, hwc, idx);
if (val & (1ULL << (x86_pmu.counter_bits - 1))) if (val & (1ULL << (x86_pmu.counter_bits - 1)))
goto next; continue;
/* counter overflow */ /* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx); x86_perf_counter_set_period(counter, hwc, idx);
handled = 1; handled = 1;
inc_irq_stat(apic_perf_irqs); inc_irq_stat(apic_perf_irqs);
disable = perf_counter_overflow(counter, nmi, regs, 0); if (perf_counter_overflow(counter, nmi, regs, 0))
next:
if (disable || throttle)
amd_pmu_disable_counter(hwc, idx); amd_pmu_disable_counter(hwc, idx);
} }
return handled; return handled;
} }
void perf_counter_unthrottle(void)
{
struct cpu_hw_counters *cpuc;
if (!x86_pmu_initialized())
return;
cpuc = &__get_cpu_var(cpu_hw_counters);
if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
/*
* Clear them before re-enabling irqs/NMIs again:
*/
cpuc->interrupts = 0;
perf_enable();
} else {
cpuc->interrupts = 0;
}
}
void smp_perf_counter_interrupt(struct pt_regs *regs) void smp_perf_counter_interrupt(struct pt_regs *regs)
{ {
irq_enter(); irq_enter();
......
...@@ -570,7 +570,6 @@ extern int perf_counter_init_task(struct task_struct *child); ...@@ -570,7 +570,6 @@ extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_do_pending(void); extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void); extern void perf_counter_print_debug(void);
extern void perf_counter_unthrottle(void);
extern void __perf_disable(void); extern void __perf_disable(void);
extern bool __perf_enable(void); extern bool __perf_enable(void);
extern void perf_disable(void); extern void perf_disable(void);
...@@ -635,7 +634,6 @@ static inline int perf_counter_init_task(struct task_struct *child) { } ...@@ -635,7 +634,6 @@ static inline int perf_counter_init_task(struct task_struct *child) { }
static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_exit_task(struct task_struct *child) { }
static inline void perf_counter_do_pending(void) { } static inline void perf_counter_do_pending(void) { }
static inline void perf_counter_print_debug(void) { } static inline void perf_counter_print_debug(void) { }
static inline void perf_counter_unthrottle(void) { }
static inline void perf_disable(void) { } static inline void perf_disable(void) { }
static inline void perf_enable(void) { } static inline void perf_enable(void) { }
static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_disable(void) { return -EINVAL; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment