Commit 78c06176 authored by Russ Anderson's avatar Russ Anderson Committed by Ingo Molnar

x86: Enable NMI on all cpus on UV

Enable NMI on all cpus in UV system and add an NMI handler
to dump_stack on each cpu.

By default on x86 all the cpus except the boot cpu have NMI
masked off.  This patch enables NMI on all cpus in UV system
and adds an NMI handler to dump_stack on each cpu.  This
way if a system hangs we can NMI the machine and get a
backtrace from all the cpus.

Version 2: Use x86_platform driver mechanism for nmi init, per
           Ingo's suggestion.

Version 3: Clean up Ingo's nits.
Signed-off-by: default avatarRuss Anderson <rja@sgi.com>
LKML-Reference: <20100226164912.GA24439@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 36028f33
...@@ -11,6 +11,7 @@ struct mm_struct; ...@@ -11,6 +11,7 @@ struct mm_struct;
extern enum uv_system_type get_uv_system_type(void); extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void); extern int is_uv_system(void);
extern void uv_cpu_init(void); extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void); extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
......
...@@ -126,6 +126,7 @@ struct x86_cpuinit_ops { ...@@ -126,6 +126,7 @@ struct x86_cpuinit_ops {
* @get_wallclock: get time from HW clock like RTC etc. * @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock * @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic * @is_untracked_pat_range exclude from PAT logic
* @nmi_init enable NMI on cpus
*/ */
struct x86_platform_ops { struct x86_platform_ops {
unsigned long (*calibrate_tsc)(void); unsigned long (*calibrate_tsc)(void);
...@@ -133,6 +134,7 @@ struct x86_platform_ops { ...@@ -133,6 +134,7 @@ struct x86_platform_ops {
int (*set_wallclock)(unsigned long nowtime); int (*set_wallclock)(unsigned long nowtime);
void (*iommu_shutdown)(void); void (*iommu_shutdown)(void);
bool (*is_untracked_pat_range)(u64 start, u64 end); bool (*is_untracked_pat_range)(u64 start, u64 end);
void (*nmi_init)(void);
}; };
extern struct x86_init_ops x86_init; extern struct x86_init_ops x86_init;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/kdebug.h>
#include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h> #include <asm/uv/uv_hub.h>
...@@ -41,6 +42,7 @@ static enum uv_system_type uv_system_type; ...@@ -41,6 +42,7 @@ static enum uv_system_type uv_system_type;
static u64 gru_start_paddr, gru_end_paddr; static u64 gru_start_paddr, gru_end_paddr;
int uv_min_hub_revision_id; int uv_min_hub_revision_id;
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
static DEFINE_SPINLOCK(uv_nmi_lock);
static inline bool is_GRU_range(u64 start, u64 end) static inline bool is_GRU_range(u64 start, u64 end)
{ {
...@@ -74,6 +76,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -74,6 +76,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
if (!strcmp(oem_id, "SGI")) { if (!strcmp(oem_id, "SGI")) {
nodeid = early_get_nodeid(); nodeid = early_get_nodeid();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
x86_platform.nmi_init = uv_nmi_init;
if (!strcmp(oem_table_id, "UVL")) if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC; uv_system_type = UV_LEGACY_APIC;
else if (!strcmp(oem_table_id, "UVX")) else if (!strcmp(oem_table_id, "UVX"))
...@@ -596,6 +599,46 @@ void __cpuinit uv_cpu_init(void) ...@@ -596,6 +599,46 @@ void __cpuinit uv_cpu_init(void)
set_x2apic_extra_bits(uv_hub_info->pnode); set_x2apic_extra_bits(uv_hub_info->pnode);
} }
/*
* When NMI is received, print a stack trace.
*/
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
{
if (reason != DIE_NMI_IPI)
return NOTIFY_OK;
/*
* Use a lock so only one cpu prints at a time
* to prevent intermixed output.
*/
spin_lock(&uv_nmi_lock);
pr_info("NMI stack dump cpu %u:\n", smp_processor_id());
dump_stack();
spin_unlock(&uv_nmi_lock);
return NOTIFY_STOP;
}
static struct notifier_block uv_dump_stack_nmi_nb = {
.notifier_call = uv_handle_nmi
};
void uv_register_nmi_notifier(void)
{
if (register_die_notifier(&uv_dump_stack_nmi_nb))
printk(KERN_WARNING "UV NMI handler failed to register\n");
}
void uv_nmi_init(void)
{
unsigned int value;
/*
* Unmask NMI on all cpus
*/
value = apic_read(APIC_LVT1) | APIC_DM_NMI;
value &= ~APIC_LVT_MASKED;
apic_write(APIC_LVT1, value);
}
void __init uv_system_init(void) void __init uv_system_init(void)
{ {
...@@ -717,6 +760,7 @@ void __init uv_system_init(void) ...@@ -717,6 +760,7 @@ void __init uv_system_init(void)
uv_cpu_init(); uv_cpu_init();
uv_scir_register_cpu_notifier(); uv_scir_register_cpu_notifier();
uv_register_nmi_notifier();
proc_mkdir("sgi_uv", NULL); proc_mkdir("sgi_uv", NULL);
/* register Legacy VGA I/O redirection handler */ /* register Legacy VGA I/O redirection handler */
......
...@@ -320,6 +320,7 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -320,6 +320,7 @@ notrace static void __cpuinit start_secondary(void *unused)
unlock_vector_lock(); unlock_vector_lock();
ipi_call_unlock(); ipi_call_unlock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init();
/* enable local interrupts */ /* enable local interrupts */
local_irq_enable(); local_irq_enable();
......
...@@ -76,10 +76,13 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { ...@@ -76,10 +76,13 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
.setup_percpu_clockev = setup_secondary_APIC_clock, .setup_percpu_clockev = setup_secondary_APIC_clock,
}; };
static void default_nmi_init(void) { };
struct x86_platform_ops x86_platform = { struct x86_platform_ops x86_platform = {
.calibrate_tsc = native_calibrate_tsc, .calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time, .get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss, .set_wallclock = mach_set_rtc_mmss,
.iommu_shutdown = iommu_shutdown_noop, .iommu_shutdown = iommu_shutdown_noop,
.is_untracked_pat_range = is_ISA_range, .is_untracked_pat_range = is_ISA_range,
.nmi_init = default_nmi_init
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment