Commit 6eb0a0fd authored by Venkatesh Pallipadi's avatar Venkatesh Pallipadi Committed by Linus Torvalds

[PATCH] i386: Handle missing local APIC timer interrupts on C3 state

Whenever we see that a CPU is capable of C3 (during ACPI cstate init), we
disable local APIC timer and switch to using a broadcast from external timer
interrupt (IRQ 0). This is needed because Intel CPUs stop the local
APIC timer in C3.  This is currently only enabled for Intel CPUs.

Patch below adds the code for i386 and also the ACPI hunk.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5a07a30c
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/module.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -37,9 +38,16 @@ ...@@ -37,9 +38,16 @@
#include <asm/i8253.h> #include <asm/i8253.h>
#include <mach_apic.h> #include <mach_apic.h>
#include <mach_ipi.h>
#include "io_ports.h" #include "io_ports.h"
/*
* cpu_mask that denotes the CPUs that needs timer interrupt coming in as
* IPIs in place of local APIC timers
*/
static cpumask_t timer_bcast_ipi;
/* /*
* Knob to control our willingness to enable the local APIC. * Knob to control our willingness to enable the local APIC.
*/ */
...@@ -931,11 +939,16 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound; ...@@ -931,11 +939,16 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
static void __setup_APIC_LVTT(unsigned int clocks) static void __setup_APIC_LVTT(unsigned int clocks)
{ {
unsigned int lvtt_value, tmp_value, ver; unsigned int lvtt_value, tmp_value, ver;
int cpu = smp_processor_id();
ver = GET_APIC_VERSION(apic_read(APIC_LVR)); ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
if (!APIC_INTEGRATED(ver)) if (!APIC_INTEGRATED(ver))
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
if (cpu_isset(cpu, timer_bcast_ipi))
lvtt_value |= APIC_LVT_MASKED;
apic_write_around(APIC_LVTT, lvtt_value); apic_write_around(APIC_LVTT, lvtt_value);
/* /*
...@@ -1068,7 +1081,7 @@ void __devinit setup_secondary_APIC_clock(void) ...@@ -1068,7 +1081,7 @@ void __devinit setup_secondary_APIC_clock(void)
setup_APIC_timer(calibration_result); setup_APIC_timer(calibration_result);
} }
void __devinit disable_APIC_timer(void) void disable_APIC_timer(void)
{ {
if (using_apic_timer) { if (using_apic_timer) {
unsigned long v; unsigned long v;
...@@ -1080,7 +1093,10 @@ void __devinit disable_APIC_timer(void) ...@@ -1080,7 +1093,10 @@ void __devinit disable_APIC_timer(void)
void enable_APIC_timer(void) void enable_APIC_timer(void)
{ {
if (using_apic_timer) { int cpu = smp_processor_id();
if (using_apic_timer &&
!cpu_isset(cpu, timer_bcast_ipi)) {
unsigned long v; unsigned long v;
v = apic_read(APIC_LVTT); v = apic_read(APIC_LVTT);
...@@ -1088,6 +1104,32 @@ void enable_APIC_timer(void) ...@@ -1088,6 +1104,32 @@ void enable_APIC_timer(void)
} }
} }
void switch_APIC_timer_to_ipi(void *cpumask)
{
cpumask_t mask = *(cpumask_t *)cpumask;
int cpu = smp_processor_id();
if (cpu_isset(cpu, mask) &&
!cpu_isset(cpu, timer_bcast_ipi)) {
disable_APIC_timer();
cpu_set(cpu, timer_bcast_ipi);
}
}
EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
void switch_ipi_to_APIC_timer(void *cpumask)
{
cpumask_t mask = *(cpumask_t *)cpumask;
int cpu = smp_processor_id();
if (cpu_isset(cpu, mask) &&
cpu_isset(cpu, timer_bcast_ipi)) {
cpu_clear(cpu, timer_bcast_ipi);
enable_APIC_timer();
}
}
EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
#undef APIC_DIVISOR #undef APIC_DIVISOR
/* /*
...@@ -1152,6 +1194,38 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) ...@@ -1152,6 +1194,38 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
irq_exit(); irq_exit();
} }
#ifndef CONFIG_SMP
static void up_apic_timer_interrupt_call(struct pt_regs *regs)
{
int cpu = smp_processor_id();
/*
* the NMI deadlock-detector uses this.
*/
per_cpu(irq_stat, cpu).apic_timer_irqs++;
smp_local_timer_interrupt(regs);
}
#endif
void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
{
cpumask_t mask;
cpus_and(mask, cpu_online_map, timer_bcast_ipi);
if (!cpus_empty(mask)) {
#ifdef CONFIG_SMP
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#else
/*
* We can directly call the apic timer interrupt handler
* in UP case. Minus all irq related functions
*/
up_apic_timer_interrupt_call(regs);
#endif
}
}
int setup_profiling_timer(unsigned int multiplier) int setup_profiling_timer(unsigned int multiplier)
{ {
return -EINVAL; return -EINVAL;
......
...@@ -302,6 +302,12 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -302,6 +302,12 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_timer_interrupt(irq, regs); do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
#ifdef CONFIG_X86_LOCAL_APIC
if (using_apic_timer)
smp_send_timer_broadcast_ipi(regs);
#endif
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -843,6 +843,15 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) ...@@ -843,6 +843,15 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
unsigned int i; unsigned int i;
unsigned int working = 0; unsigned int working = 0;
#ifdef ARCH_APICTIMER_STOPS_ON_C3
struct cpuinfo_x86 *c = cpu_data + pr->id;
cpumask_t mask = cpumask_of_cpu(pr->id);
if (c->x86_vendor == X86_VENDOR_INTEL) {
on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
}
#endif
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i]; struct acpi_processor_cx *cx = &pr->power.states[i];
...@@ -857,6 +866,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) ...@@ -857,6 +866,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C3: case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx); acpi_processor_power_verify_c3(pr, cx);
#ifdef ARCH_APICTIMER_STOPS_ON_C3
if (c->x86_vendor == X86_VENDOR_INTEL) {
on_each_cpu(switch_APIC_timer_to_ipi,
&mask, 1, 1);
}
#endif
break; break;
} }
......
...@@ -132,6 +132,11 @@ extern unsigned int nmi_watchdog; ...@@ -132,6 +132,11 @@ extern unsigned int nmi_watchdog;
extern int disable_timer_pin_1; extern int disable_timer_pin_1;
void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
void switch_APIC_timer_to_ipi(void *cpumask);
void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1
#else /* !CONFIG_X86_LOCAL_APIC */ #else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { } static inline void lapic_shutdown(void) { }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment