Commit 89e7eb09 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RAS updates from Ingo Molnar:
 "The biggest change in this cycle was an enhancement by Yazen Ghannam
  to reduce the number of MCE error injection related IPIs.

  The rest are smaller fixes"

* 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Fix mce_rdmsrl() warning message
  x86/RAS/AMD: Reduce the number of IPIs when prepping error injection
  x86/mce/AMD: Increase size of the bank_map type
  x86/mce: Do not use bank 1 for APEI generated error logs
parents c86ad14d 38c54ccb
...@@ -46,7 +46,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) ...@@ -46,7 +46,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
return; return;
mce_setup(&m); mce_setup(&m);
m.bank = 1; m.bank = -1;
/* Fake a memory read error with unknown channel */ /* Fake a memory read error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
......
...@@ -425,7 +425,7 @@ static u64 mce_rdmsrl(u32 msr) ...@@ -425,7 +425,7 @@ static u64 mce_rdmsrl(u32 msr)
} }
if (rdmsrl_safe(msr, &v)) { if (rdmsrl_safe(msr, &v)) {
WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr); WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
/* /*
* Return zero in case the access faulted. This should * Return zero in case the access faulted. This should
* not happen normally but can happen if the CPU does * not happen normally but can happen if the CPU does
......
...@@ -93,7 +93,7 @@ const char * const amd_df_mcablock_names[] = { ...@@ -93,7 +93,7 @@ const char * const amd_df_mcablock_names[] = {
EXPORT_SYMBOL_GPL(amd_df_mcablock_names); EXPORT_SYMBOL_GPL(amd_df_mcablock_names);
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
static void amd_threshold_interrupt(void); static void amd_threshold_interrupt(void);
static void amd_deferred_error_interrupt(void); static void amd_deferred_error_interrupt(void);
......
...@@ -241,6 +241,31 @@ static void toggle_nb_mca_mst_cpu(u16 nid) ...@@ -241,6 +241,31 @@ static void toggle_nb_mca_mst_cpu(u16 nid)
__func__, PCI_FUNC(F3->devfn), NBCFG); __func__, PCI_FUNC(F3->devfn), NBCFG);
} }
static void prepare_msrs(void *info)
{
struct mce i_mce = *(struct mce *)info;
u8 b = i_mce.bank;
wrmsrl(MSR_IA32_MCG_STATUS, i_mce.mcgstatus);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
if (i_mce.inject_flags == DFR_INT_INJ) {
wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), i_mce.status);
wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), i_mce.addr);
} else {
wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), i_mce.status);
wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), i_mce.addr);
}
wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), i_mce.misc);
} else {
wrmsrl(MSR_IA32_MCx_STATUS(b), i_mce.status);
wrmsrl(MSR_IA32_MCx_ADDR(b), i_mce.addr);
wrmsrl(MSR_IA32_MCx_MISC(b), i_mce.misc);
}
}
static void do_inject(void) static void do_inject(void)
{ {
u64 mcg_status = 0; u64 mcg_status = 0;
...@@ -287,36 +312,9 @@ static void do_inject(void) ...@@ -287,36 +312,9 @@ static void do_inject(void)
toggle_hw_mce_inject(cpu, true); toggle_hw_mce_inject(cpu, true);
wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS, i_mce.mcgstatus = mcg_status;
(u32)mcg_status, (u32)(mcg_status >> 32)); i_mce.inject_flags = inj_type;
smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
if (inj_type == DFR_INT_INJ) {
wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DESTAT(b),
(u32)i_mce.status, (u32)(i_mce.status >> 32));
wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DEADDR(b),
(u32)i_mce.addr, (u32)(i_mce.addr >> 32));
} else {
wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_STATUS(b),
(u32)i_mce.status, (u32)(i_mce.status >> 32));
wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_ADDR(b),
(u32)i_mce.addr, (u32)(i_mce.addr >> 32));
}
wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(b),
(u32)i_mce.misc, (u32)(i_mce.misc >> 32));
} else {
wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
(u32)i_mce.status, (u32)(i_mce.status >> 32));
wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
(u32)i_mce.addr, (u32)(i_mce.addr >> 32));
wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
(u32)i_mce.misc, (u32)(i_mce.misc >> 32));
}
toggle_hw_mce_inject(cpu, false); toggle_hw_mce_inject(cpu, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment