Commit 158405e8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ras_core_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RAS updates from Borislav Petkov:

 - Get rid of a bunch of function pointers used in MCA land in favor of
   normal functions. This is in preparation of making the MCA code
   noinstr-aware

 - When the kernel copies data from user addresses and it encounters a
   machine check, a SIGBUS is sent to that process. Change this action
   to either an -EFAULT which is returned to the user or a short write,
   making the recovery action a lot more user-friendly

* tag 'ras_core_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Sort mca_config members to get rid of unnecessary padding
  x86/mce: Get rid of the ->quirk_no_way_out() indirect call
  x86/mce: Get rid of msr_ops
  x86/mce: Get rid of machine_check_vector
  x86/mce: Get rid of the mce_severity function pointer
  x86/mce: Drop copyin special case for #MC
  x86/mce: Change to not send SIGBUS error during copy from user
parents 93351d2c 15802468
...@@ -205,28 +205,16 @@ struct cper_ia_proc_ctx; ...@@ -205,28 +205,16 @@ struct cper_ia_proc_ctx;
int mcheck_init(void); int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c); void mcheck_cpu_init(struct cpuinfo_x86 *c);
void mcheck_cpu_clear(struct cpuinfo_x86 *c); void mcheck_cpu_clear(struct cpuinfo_x86 *c);
void mcheck_vendor_init_severity(void);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id); u64 lapic_id);
#else #else
static inline int mcheck_init(void) { return 0; } static inline int mcheck_init(void) { return 0; }
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {} static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
static inline void mcheck_vendor_init_severity(void) {}
static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id) { return -EINVAL; } u64 lapic_id) { return -EINVAL; }
#endif #endif
#ifdef CONFIG_X86_ANCIENT_MCE
void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
void winchip_mcheck_init(struct cpuinfo_x86 *c);
static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
#else
static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
static inline void enable_p5_mce(void) {}
#endif
void mce_setup(struct mce *m); void mce_setup(struct mce *m);
void mce_log(struct mce *m); void mce_log(struct mce *m);
DECLARE_PER_CPU(struct device *, mce_device); DECLARE_PER_CPU(struct device *, mce_device);
......
...@@ -526,7 +526,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high, ...@@ -526,7 +526,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high,
/* Fall back to method we used for older processors: */ /* Fall back to method we used for older processors: */
switch (block) { switch (block) {
case 0: case 0:
addr = msr_ops.misc(bank); addr = mca_msr_reg(bank, MCA_MISC);
break; break;
case 1: case 1:
offset = ((low & MASK_BLKPTR_LO) >> 21); offset = ((low & MASK_BLKPTR_LO) >> 21);
...@@ -978,8 +978,8 @@ static void log_error_deferred(unsigned int bank) ...@@ -978,8 +978,8 @@ static void log_error_deferred(unsigned int bank)
{ {
bool defrd; bool defrd;
defrd = _log_error_bank(bank, msr_ops.status(bank), defrd = _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS),
msr_ops.addr(bank), 0); mca_msr_reg(bank, MCA_ADDR), 0);
if (!mce_flags.smca) if (!mce_flags.smca)
return; return;
...@@ -1009,7 +1009,7 @@ static void amd_deferred_error_interrupt(void) ...@@ -1009,7 +1009,7 @@ static void amd_deferred_error_interrupt(void)
static void log_error_thresholding(unsigned int bank, u64 misc) static void log_error_thresholding(unsigned int bank, u64 misc)
{ {
_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), mca_msr_reg(bank, MCA_ADDR), misc);
} }
static void log_and_reset_block(struct threshold_block *block) static void log_and_reset_block(struct threshold_block *block)
...@@ -1397,7 +1397,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, ...@@ -1397,7 +1397,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
} }
} }
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC));
if (err) if (err)
goto out_kobj; goto out_kobj;
......
This diff is collapsed.
...@@ -8,9 +8,6 @@ ...@@ -8,9 +8,6 @@
#include <linux/device.h> #include <linux/device.h>
#include <asm/mce.h> #include <asm/mce.h>
/* Pointer to the installed machine check handler for this CPU setup. */
extern void (*machine_check_vector)(struct pt_regs *);
enum severity_level { enum severity_level {
MCE_NO_SEVERITY, MCE_NO_SEVERITY,
MCE_DEFERRED_SEVERITY, MCE_DEFERRED_SEVERITY,
...@@ -38,8 +35,7 @@ int mce_gen_pool_add(struct mce *mce); ...@@ -38,8 +35,7 @@ int mce_gen_pool_add(struct mce *mce);
int mce_gen_pool_init(void); int mce_gen_pool_init(void);
struct llist_node *mce_gen_pool_prepare_records(void); struct llist_node *mce_gen_pool_prepare_records(void);
extern int (*mce_severity)(struct mce *a, struct pt_regs *regs, int mce_severity(struct mce *a, struct pt_regs *regs, int tolerant, char **msg, bool is_excp);
int tolerant, char **msg, bool is_excp);
struct dentry *mce_get_debugfs_dir(void); struct dentry *mce_get_debugfs_dir(void);
extern mce_banks_t mce_banks_ce_disabled; extern mce_banks_t mce_banks_ce_disabled;
...@@ -117,23 +113,25 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } ...@@ -117,23 +113,25 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
#endif #endif
struct mca_config { struct mca_config {
bool dont_log_ce;
bool cmci_disabled;
bool ignore_ce;
bool print_all;
__u64 lmce_disabled : 1, __u64 lmce_disabled : 1,
disabled : 1, disabled : 1,
ser : 1, ser : 1,
recovery : 1, recovery : 1,
bios_cmci_threshold : 1, bios_cmci_threshold : 1,
__reserved : 59; /* Proper #MC exception handler is set */
initialized : 1,
__reserved : 58;
bool dont_log_ce;
bool cmci_disabled;
bool ignore_ce;
bool print_all;
s8 bootlog;
int tolerant; int tolerant;
int monarch_timeout; int monarch_timeout;
int panic_timeout; int panic_timeout;
u32 rip_msr; u32 rip_msr;
s8 bootlog;
}; };
extern struct mca_config mca_cfg; extern struct mca_config mca_cfg;
...@@ -163,19 +161,28 @@ struct mce_vendor_flags { ...@@ -163,19 +161,28 @@ struct mce_vendor_flags {
/* AMD-style error thresholding banks present. */ /* AMD-style error thresholding banks present. */
amd_threshold : 1, amd_threshold : 1,
__reserved_0 : 60; /* Pentium, family 5-style MCA */
p5 : 1,
/* Centaur Winchip C6-style MCA */
winchip : 1,
/* SandyBridge IFU quirk */
snb_ifu_quirk : 1,
__reserved_0 : 57;
}; };
extern struct mce_vendor_flags mce_flags; extern struct mce_vendor_flags mce_flags;
struct mca_msr_regs { enum mca_msr {
u32 (*ctl) (int bank); MCA_CTL,
u32 (*status) (int bank); MCA_STATUS,
u32 (*addr) (int bank); MCA_ADDR,
u32 (*misc) (int bank); MCA_MISC,
}; };
extern struct mca_msr_regs msr_ops; u32 mca_msr_reg(int bank, enum mca_msr reg);
/* Decide whether to add MCE record to MCE event pool or filter it out. */ /* Decide whether to add MCE record to MCE event pool or filter it out. */
extern bool filter_mce(struct mce *m); extern bool filter_mce(struct mce *m);
...@@ -186,4 +193,18 @@ extern bool amd_filter_mce(struct mce *m); ...@@ -186,4 +193,18 @@ extern bool amd_filter_mce(struct mce *m);
static inline bool amd_filter_mce(struct mce *m) { return false; } static inline bool amd_filter_mce(struct mce *m) { return false; }
#endif #endif
#ifdef CONFIG_X86_ANCIENT_MCE
void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
void winchip_mcheck_init(struct cpuinfo_x86 *c);
noinstr void pentium_machine_check(struct pt_regs *regs);
noinstr void winchip_machine_check(struct pt_regs *regs);
static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
#else
static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
static inline void enable_p5_mce(void) {}
static inline void pentium_machine_check(struct pt_regs *regs) {}
static inline void winchip_machine_check(struct pt_regs *regs) {}
#endif
#endif /* __X86_MCE_INTERNAL_H__ */ #endif /* __X86_MCE_INTERNAL_H__ */
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
int mce_p5_enabled __read_mostly; int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */ /* Machine check handler for Pentium class Intel CPUs: */
static noinstr void pentium_machine_check(struct pt_regs *regs) noinstr void pentium_machine_check(struct pt_regs *regs)
{ {
u32 loaddr, hi, lotype; u32 loaddr, hi, lotype;
...@@ -54,10 +54,6 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) ...@@ -54,10 +54,6 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_MCE)) if (!cpu_has(c, X86_FEATURE_MCE))
return; return;
machine_check_vector = pentium_machine_check;
/* Make sure the vector pointer is visible before we enable MCEs: */
wmb();
/* Read registers before enabling: */ /* Read registers before enabling: */
rdmsr(MSR_IA32_P5_MC_ADDR, l, h); rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
rdmsr(MSR_IA32_P5_MC_TYPE, l, h); rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
......
...@@ -407,15 +407,14 @@ static int mce_severity_intel(struct mce *m, struct pt_regs *regs, ...@@ -407,15 +407,14 @@ static int mce_severity_intel(struct mce *m, struct pt_regs *regs,
} }
} }
/* Default to mce_severity_intel */ int mce_severity(struct mce *m, struct pt_regs *regs, int tolerant, char **msg,
int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) = bool is_excp)
mce_severity_intel;
void __init mcheck_vendor_init_severity(void)
{ {
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
mce_severity = mce_severity_amd; return mce_severity_amd(m, regs, tolerant, msg, is_excp);
else
return mce_severity_intel(m, regs, tolerant, msg, is_excp);
} }
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "internal.h" #include "internal.h"
/* Machine check handler for WinChip C6: */ /* Machine check handler for WinChip C6: */
static noinstr void winchip_machine_check(struct pt_regs *regs) noinstr void winchip_machine_check(struct pt_regs *regs)
{ {
instrumentation_begin(); instrumentation_begin();
pr_emerg("CPU0: Machine Check Exception.\n"); pr_emerg("CPU0: Machine Check Exception.\n");
...@@ -30,10 +30,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) ...@@ -30,10 +30,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
{ {
u32 lo, hi; u32 lo, hi;
machine_check_vector = winchip_machine_check;
/* Make sure the vector pointer is visible before we enable MCEs: */
wmb();
rdmsr(MSR_IDT_FCR1, lo, hi); rdmsr(MSR_IDT_FCR1, lo, hi);
lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */
lo &= ~(1<<4); /* Enable MCE */ lo &= ~(1<<4); /* Enable MCE */
......
...@@ -234,24 +234,11 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) ...@@ -234,24 +234,11 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
*/ */
SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
movl %edx,%ecx movl %edx,%ecx
cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */
je 3f
1: rep movsb 1: rep movsb
2: mov %ecx,%eax 2: mov %ecx,%eax
ASM_CLAC ASM_CLAC
ret ret
/*
* Return zero to pretend that this copy succeeded. This
* is counter-intuitive, but needed to prevent the code
* in lib/iov_iter.c from retrying and running back into
* the poison cache line again. The machine check handler
* will ensure that a SIGBUS is sent to the task.
*/
3: xorl %eax,%eax
ASM_CLAC
ret
_ASM_EXTABLE_CPY(1b, 2b) _ASM_EXTABLE_CPY(1b, 2b)
SYM_CODE_END(.Lcopy_user_handle_tail) SYM_CODE_END(.Lcopy_user_handle_tail)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment