Commit 24cece3a authored by Dave Jones's avatar Dave Jones

[PATCH] x86 bluesmoke update.

o  Make MCE compile time optional	(Paul Gortmaker)
o  P4 thermal trip monitoring.		(Zwane Mwaikambo)
o  Non-fatal MCE logging.		(Me)
parent e2b0649d
...@@ -803,6 +803,30 @@ CONFIG_APM_REAL_MODE_POWER_OFF ...@@ -803,6 +803,30 @@ CONFIG_APM_REAL_MODE_POWER_OFF
a work-around for a number of buggy BIOSes. Switch this option on if a work-around for a number of buggy BIOSes. Switch this option on if
your computer crashes instead of powering off properly. your computer crashes instead of powering off properly.
CONFIG_X86_MCE
Machine Check Exception support allows the processor to notify the
kernel if it detects a problem (e.g. overheating, component failure).
The action the kernel takes depends on the severity of the problem,
ranging from a warning message on the console, to halting the machine.
Your processor must be a Pentium or newer to support this - check the
flags in /proc/cpuinfo for mce. Note that some older Pentium systems
have a design flaw which leads to false MCE events - hence MCE is
disabled on all P5 processors, unless explicitly enabled with "mce"
as a boot argument. Similarly, if MCE is built in and creates a
problem on some new non-standard machine, you can boot with "nomce"
to disable it. MCE support simply ignores non-MCE processors like
the 386 and 486, so nearly everyone can say Y here.
CONFIG_X86_MCE_NONFATAL
Enabling this feature starts a timer that triggers every 5 seconds which
will look at the machine check registers to see if anything happened.
Non-fatal problems automatically get corrected (but still logged).
Disable this if you don't want to see these messages.
Seeing the messages this option prints out may be indicative of dying hardware,
or out-of-spec (ie, overclocked) hardware.
This option only does something on hardware with Intel P6 style MCE.
(Pentium Pro and above, AMD Athlon/Duron)
CONFIG_TOSHIBA CONFIG_TOSHIBA
This adds a driver to safely access the System Management Mode of This adds a driver to safely access the System Management Mode of
the CPU on Toshiba portables with a genuine Toshiba BIOS. It does the CPU on Toshiba portables with a genuine Toshiba BIOS. It does
...@@ -909,3 +933,6 @@ CONFIG_DEBUG_BUGVERBOSE ...@@ -909,3 +933,6 @@ CONFIG_DEBUG_BUGVERBOSE
of the BUG call as well as the EIP and oops trace. This aids of the BUG call as well as the EIP and oops trace. This aids
debugging but costs about 70-100K of memory. debugging but costs about 70-100K of memory.
CONFIG_DEBUG_OBSOLETE
Say Y here if you want to reduce the chances of the tree compiling,
and are prepared to dig into driver internals to fix compile errors.
...@@ -156,6 +156,10 @@ if [ "$CONFIG_MWINCHIP3D" = "y" ]; then ...@@ -156,6 +156,10 @@ if [ "$CONFIG_MWINCHIP3D" = "y" ]; then
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
define_bool CONFIG_X86_OOSTORE y define_bool CONFIG_X86_OOSTORE y
fi fi
bool 'Machine Check Exception' CONFIG_X86_MCE
dep_bool 'Check for non-fatal errors' CONFIG_X86_MCE_NONFATAL $CONFIG_X86_MCE
tristate 'Toshiba Laptop support' CONFIG_TOSHIBA tristate 'Toshiba Laptop support' CONFIG_TOSHIBA
tristate 'Dell laptop support' CONFIG_I8K tristate 'Dell laptop support' CONFIG_I8K
......
...@@ -449,6 +449,7 @@ static struct { ...@@ -449,6 +449,7 @@ static struct {
unsigned int apic_lvterr; unsigned int apic_lvterr;
unsigned int apic_tmict; unsigned int apic_tmict;
unsigned int apic_tdcr; unsigned int apic_tdcr;
unsigned int apic_thmr;
} apic_pm_state; } apic_pm_state;
static void apic_pm_suspend(void *data) static void apic_pm_suspend(void *data)
...@@ -470,6 +471,7 @@ static void apic_pm_suspend(void *data) ...@@ -470,6 +471,7 @@ static void apic_pm_suspend(void *data)
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
__save_flags(flags); __save_flags(flags);
__cli(); __cli();
disable_local_APIC(); disable_local_APIC();
...@@ -498,6 +500,7 @@ static void apic_pm_resume(void *data) ...@@ -498,6 +500,7 @@ static void apic_pm_resume(void *data)
apic_write(APIC_SPIV, apic_pm_state.apic_spiv); apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
......
...@@ -3,16 +3,128 @@ ...@@ -3,16 +3,128 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h>
#include <linux/config.h>
#include <linux/irq.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/apic.h>
#include <asm/pgtable.h>
#ifdef CONFIG_X86_MCE
static int mce_disabled __initdata = 0; static int mce_disabled __initdata = 0;
static int banks;
/* /*
* Machine Check Handler For PII/PIII * If we get an MCE, we don't know what state the caches/TLB's are
* going to be in, so we throw them all away.
*/ */
static void inline flush_all (void)
{
__asm__ __volatile__ ("invd": : );
__flush_tlb();
}
static int banks; /*
* P4/Xeon Thermal transition interrupt handler
*/
static void intel_thermal_interrupt(struct pt_regs *regs)
{
#ifdef CONFIG_X86_LOCAL_APIC
u32 l, h;
unsigned int cpu = smp_processor_id();
ack_APIC_irq();
rdmsr(MSR_IA32_THERM_STATUS, l, h);
if (l & 1) {
printk(KERN_EMERG "CPU#%d: Temperature above threshold\n", cpu);
printk(KERN_EMERG "CPU#%d: Running in modulated clock mode\n", cpu);
} else {
printk(KERN_INFO "CPU#%d: Temperature/speed normal\n", cpu);
}
#endif
}
static void unexpected_thermal_interrupt(struct pt_regs *regs)
{
printk(KERN_ERR "CPU#%d: Unexpected LVT TMR interrupt!\n", smp_processor_id());
}
/*
* Thermal interrupt handler for this CPU setup
*/
static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;
asmlinkage void smp_thermal_interrupt(struct pt_regs regs)
{
vendor_thermal_interrupt(&regs);
}
/* P4/Xeon Thermal regulation detect and init */
static void __init intel_init_thermal(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_LOCAL_APIC
u32 l, h;
unsigned int cpu = smp_processor_id();
/* Thermal monitoring */
if (!test_bit(X86_FEATURE_ACPI, &c->x86_capability))
return; /* -ENODEV */
/* Clock modulation */
if (!test_bit(X86_FEATURE_ACC, &c->x86_capability))
return; /* -ENODEV */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
/* first check if its enabled already, in which case there might
* be some SMM goo which handles it, so we can't even put a handler
* since it might be delivered via SMI already -zwanem.
*/
if (l & (1<<3)) {
printk(KERN_DEBUG "CPU#%d: Thermal monitoring already enabled\n", cpu);
} else {
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
printk(KERN_INFO "CPU#%d: Thermal monitoring enabled\n", cpu);
}
/* check wether a vector already exists */
l = apic_read(APIC_LVTTHMR);
if (l & 0xff) {
printk(KERN_DEBUG "CPU#%d: Thermal LVT already handled\n", cpu);
return; /* -EBUSY */
}
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
printk(KERN_INFO "CPU#%d: Thermal monitoring enabled\n", cpu);
/* The temperature transition interrupt handler setup */
l = THERMAL_APIC_VECTOR; /* our delivery vector */
l |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
apic_write_around(APIC_LVTTHMR, l);
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x3 , h);
/* ok we're good to go... */
vendor_thermal_interrupt = intel_thermal_interrupt;
l = apic_read(APIC_LVTTHMR);
apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
return;
#endif
}
/*
* Machine Check Handler For PII/PIII
*/
static void intel_machine_check(struct pt_regs * regs, long error_code) static void intel_machine_check(struct pt_regs * regs, long error_code)
{ {
...@@ -20,7 +132,9 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) ...@@ -20,7 +132,9 @@ static void intel_machine_check(struct pt_regs * regs, long error_code)
u32 alow, ahigh, high, low; u32 alow, ahigh, high, low;
u32 mcgstl, mcgsth; u32 mcgstl, mcgsth;
int i; int i;
flush_all();
rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
if(mcgstl&(1<<0)) /* Recoverable ? */ if(mcgstl&(1<<0)) /* Recoverable ? */
recover=0; recover=0;
...@@ -41,13 +155,12 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) ...@@ -41,13 +155,12 @@ static void intel_machine_check(struct pt_regs * regs, long error_code)
if(high&(1<<27)) if(high&(1<<27))
{ {
rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
printk("[%08x%08x]", alow, ahigh); printk("[%08x%08x]", ahigh, alow);
} }
if(high&(1<<26)) if(high&(1<<26))
{ {
rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
printk(" at %08x%08x", printk(" at %08x%08x", ahigh, alow);
ahigh, alow);
} }
printk("\n"); printk("\n");
/* Clear it */ /* Clear it */
...@@ -109,8 +222,55 @@ asmlinkage void do_machine_check(struct pt_regs * regs, long error_code) ...@@ -109,8 +222,55 @@ asmlinkage void do_machine_check(struct pt_regs * regs, long error_code)
machine_check_vector(regs, error_code); machine_check_vector(regs, error_code);
} }
#ifdef CONFIG_X86_MCE_NONFATAL
struct timer_list mce_timer;
static void mce_checkregs (unsigned int cpu)
{
u32 low, high;
int i;
if (cpu!=smp_processor_id())
BUG();
for (i=0; i<banks; i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
if ((low | high) != 0) {
flush_all();
printk (KERN_EMERG "MCE: The hardware reports a non fatal, correctable incident occured on CPU %d.\n", smp_processor_id());
printk (KERN_EMERG "Bank %d: %08x%08x\n", i, high, low);
/* Scrub the error so we don't pick it up in 5 seconds time. */
wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
/* Serialize */
wmb();
}
}
/* Refresh the timer. */
mce_timer.expires = jiffies + 5 * HZ;
add_timer (&mce_timer);
}
static void mce_timerfunc (unsigned long data)
{
int i;
for (i=0; i<smp_num_cpus; i++) {
if (i == smp_processor_id())
mce_checkregs(i);
else
smp_call_function (mce_checkregs, i, 1, 1);
}
}
#endif
/* /*
* Set up machine check reporting for Intel processors * Set up machine check reporting for processors with Intel style MCE
*/ */
static void __init intel_mcheck_init(struct cpuinfo_x86 *c) static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
...@@ -164,19 +324,27 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c) ...@@ -164,19 +324,27 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
if(done==0) if(done==0)
printk(KERN_INFO "Intel machine check architecture supported.\n"); printk(KERN_INFO "Intel machine check architecture supported.\n");
rdmsr(MSR_IA32_MCG_CAP, l, h); rdmsr(MSR_IA32_MCG_CAP, l, h);
if(l&(1<<8)) if(l&(1<<8)) /* Control register present ? */
wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
banks = l&0xff; banks = l&0xff;
for(i=1;i<banks;i++)
{ /* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */
wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); if (c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 6) {
for(i=1; i<banks; i++)
wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
} else {
for(i=0; i<banks; i++)
wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
} }
for(i=0;i<banks;i++)
{ for(i=0; i<banks; i++)
wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
}
set_in_cr4(X86_CR4_MCE); set_in_cr4(X86_CR4_MCE);
printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", smp_processor_id()); printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", smp_processor_id());
intel_init_thermal(c);
done=1; done=1;
} }
...@@ -206,28 +374,36 @@ static void __init winchip_mcheck_init(struct cpuinfo_x86 *c) ...@@ -206,28 +374,36 @@ static void __init winchip_mcheck_init(struct cpuinfo_x86 *c)
* This has to be run for each processor * This has to be run for each processor
*/ */
void __init mcheck_init(struct cpuinfo_x86 *c) void __init mcheck_init(struct cpuinfo_x86 *c)
{ {
if(mce_disabled==1) if(mce_disabled==1)
return; return;
switch(c->x86_vendor) switch(c->x86_vendor)
{ {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
/* /* AMD K7 machine check is Intel like */
* AMD K7 machine check is Intel like if(c->x86 == 6) {
*/
if(c->x86 == 6)
intel_mcheck_init(c); intel_mcheck_init(c);
#ifdef CONFIG_X86_MCE_NONFATAL
/* Set the timer to check for non-fatal errors every 5 seconds */
init_timer (&mce_timer);
mce_timer.expires = jiffies + 5 * HZ;
mce_timer.data = 0;
mce_timer.function = &mce_timerfunc;
add_timer (&mce_timer);
#endif
}
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
intel_mcheck_init(c); intel_mcheck_init(c);
break; break;
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
winchip_mcheck_init(c); winchip_mcheck_init(c);
break; break;
default: default:
break; break;
} }
...@@ -247,3 +423,9 @@ static int __init mcheck_enable(char *str) ...@@ -247,3 +423,9 @@ static int __init mcheck_enable(char *str)
__setup("nomce", mcheck_disable); __setup("nomce", mcheck_disable);
__setup("mce", mcheck_enable); __setup("mce", mcheck_enable);
#else
asmlinkage void do_machine_check(struct pt_regs * regs, long error_code) {}
asmlinkage void smp_thermal_interrupt(struct pt_regs regs) {}
void __init mcheck_init(struct cpuinfo_x86 *c) {}
#endif
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
#define SET_APIC_DEST_FIELD(x) ((x)<<24) #define SET_APIC_DEST_FIELD(x) ((x)<<24)
#define APIC_LVTT 0x320 #define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340 #define APIC_LVTPC 0x340
#define APIC_LVT0 0x350 #define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) #define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
...@@ -280,7 +281,16 @@ struct local_apic { ...@@ -280,7 +281,16 @@ struct local_apic {
u32 __reserved_4[3]; u32 __reserved_4[3];
} lvt_timer; } lvt_timer;
/*330*/ struct { u32 __reserved[4]; } __reserved_15; /*330*/ struct { /* LVT - Thermal Sensor */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_thermal;
/*340*/ struct { /* LVT - Performance Counter */ /*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8, u32 vector : 8,
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define RESCHEDULE_VECTOR 0xfc #define RESCHEDULE_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb #define CALL_FUNCTION_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xf0
/* /*
* Local APIC timer IRQ vector is on a different priority level, * Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ * to work around the 'lost local interrupt if more than 2 IRQ
......
...@@ -57,8 +57,13 @@ ...@@ -57,8 +57,13 @@
#define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MCG_STATUS 0x17a
#define MSR_IA32_MCG_CTL 0x17b #define MSR_IA32_MCG_CTL 0x17b
#define MSR_IA32_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL0 0x186
#define MSR_IA32_EVNTSEL1 0x187 #define MSR_P6_EVNTSEL1 0x187
#define MSR_IA32_THERM_CONTROL 0x19a
#define MSR_IA32_THERM_INTERRUPT 0x19b
#define MSR_IA32_THERM_STATUS 0x19c
#define MSR_IA32_MISC_ENABLE 0x1a0
#define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_DEBUGCTLMSR 0x1d9
#define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHFROMIP 0x1db
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment