Commit 123aa76e authored by Andi Kleen's avatar Andi Kleen Committed by H. Peter Anvin

x86, mce: don't disable machine checks during code patching

Impact: low priority bug fix

This removes part of a a patch I added myself some time ago. After some
consideration the patch was a bad idea. In particular it stopped machine check
exceptions during code patching.

To quote the comment:

        * MCEs only happen when something got corrupted and in this
        * case we must do something about the corruption.
        * Ignoring it is worse than a unlikely patching race.
        * Also machine checks tend to be broadcast and if one CPU
        * goes into machine check the others follow quickly, so we don't
        * expect a machine check to cause undue problems during to code
        * patching.

So undo the machine check related parts of
8f4e956b NMIs are still disabled.

This only removes code, the only additions are a new comment.
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 973a2dd1
...@@ -120,8 +120,6 @@ extern void mcheck_init(struct cpuinfo_x86 *c); ...@@ -120,8 +120,6 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
#else #else
#define mcheck_init(c) do { } while (0) #define mcheck_init(c) do { } while (0)
#endif #endif
extern void stop_mce(void);
extern void restart_mce(void);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */ #endif /* _ASM_X86_MCE_H */
...@@ -414,9 +414,17 @@ void __init alternative_instructions(void) ...@@ -414,9 +414,17 @@ void __init alternative_instructions(void)
that might execute the to be patched code. that might execute the to be patched code.
Other CPUs are not running. */ Other CPUs are not running. */
stop_nmi(); stop_nmi();
#ifdef CONFIG_X86_MCE
stop_mce(); /*
#endif * Don't stop machine check exceptions while patching.
* MCEs only happen when something got corrupted and in this
* case we must do something about the corruption.
* Ignoring it is worse than a unlikely patching race.
* Also machine checks tend to be broadcast and if one CPU
* goes into machine check the others follow quickly, so we don't
* expect a machine check to cause undue problems during to code
* patching.
*/
apply_alternatives(__alt_instructions, __alt_instructions_end); apply_alternatives(__alt_instructions, __alt_instructions_end);
...@@ -456,9 +464,6 @@ void __init alternative_instructions(void) ...@@ -456,9 +464,6 @@ void __init alternative_instructions(void)
(unsigned long)__smp_locks_end); (unsigned long)__smp_locks_end);
restart_nmi(); restart_nmi();
#ifdef CONFIG_X86_MCE
restart_mce();
#endif
} }
/** /**
......
...@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c) ...@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c)
} }
} }
static unsigned long old_cr4 __initdata;
void __init stop_mce(void)
{
old_cr4 = read_cr4();
clear_in_cr4(X86_CR4_MCE);
}
void __init restart_mce(void)
{
if (old_cr4 & X86_CR4_MCE)
set_in_cr4(X86_CR4_MCE);
}
static int __init mcheck_disable(char *str) static int __init mcheck_disable(char *str)
{ {
mce_disabled = 1; mce_disabled = 1;
......
...@@ -680,20 +680,6 @@ static struct miscdevice mce_log_device = { ...@@ -680,20 +680,6 @@ static struct miscdevice mce_log_device = {
&mce_chrdev_ops, &mce_chrdev_ops,
}; };
static unsigned long old_cr4 __initdata;
void __init stop_mce(void)
{
old_cr4 = read_cr4();
clear_in_cr4(X86_CR4_MCE);
}
void __init restart_mce(void)
{
if (old_cr4 & X86_CR4_MCE)
set_in_cr4(X86_CR4_MCE);
}
/* /*
* Old style boot options parsing. Only for compatibility. * Old style boot options parsing. Only for compatibility.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment