Commit 5b828263 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ras_core_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 RAS updates from Borislav Petkov:

 - Simplification of the AMD MCE error severity grading logic along with
   supplying critical panic MCEs with accompanying error messages for
   more human-friendly diagnostics.

 - Misc fixes

* tag 'ras_core_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Add messages for panic errors in AMD's MCE grading
  x86/mce: Simplify AMD severity grading logic
  x86/MCE/AMD: Fix memory leak when threshold_create_bank() fails
  x86/mce: Avoid unnecessary padding in struct mce_bank
parents eb39e37d fa619f51
...@@ -1294,10 +1294,23 @@ static void threshold_remove_bank(struct threshold_bank *bank) ...@@ -1294,10 +1294,23 @@ static void threshold_remove_bank(struct threshold_bank *bank)
kfree(bank); kfree(bank);
} }
static void __threshold_remove_device(struct threshold_bank **bp)
{
unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
for (bank = 0; bank < numbanks; bank++) {
if (!bp[bank])
continue;
threshold_remove_bank(bp[bank]);
bp[bank] = NULL;
}
kfree(bp);
}
int mce_threshold_remove_device(unsigned int cpu) int mce_threshold_remove_device(unsigned int cpu)
{ {
struct threshold_bank **bp = this_cpu_read(threshold_banks); struct threshold_bank **bp = this_cpu_read(threshold_banks);
unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
if (!bp) if (!bp)
return 0; return 0;
...@@ -1308,13 +1321,7 @@ int mce_threshold_remove_device(unsigned int cpu) ...@@ -1308,13 +1321,7 @@ int mce_threshold_remove_device(unsigned int cpu)
*/ */
this_cpu_write(threshold_banks, NULL); this_cpu_write(threshold_banks, NULL);
for (bank = 0; bank < numbanks; bank++) { __threshold_remove_device(bp);
if (bp[bank]) {
threshold_remove_bank(bp[bank]);
bp[bank] = NULL;
}
}
kfree(bp);
return 0; return 0;
} }
...@@ -1351,15 +1358,14 @@ int mce_threshold_create_device(unsigned int cpu) ...@@ -1351,15 +1358,14 @@ int mce_threshold_create_device(unsigned int cpu)
if (!(this_cpu_read(bank_map) & (1 << bank))) if (!(this_cpu_read(bank_map) & (1 << bank)))
continue; continue;
err = threshold_create_bank(bp, cpu, bank); err = threshold_create_bank(bp, cpu, bank);
if (err) if (err) {
goto out_err; __threshold_remove_device(bp);
return err;
}
} }
this_cpu_write(threshold_banks, bp); this_cpu_write(threshold_banks, bp);
if (thresholding_irq_en) if (thresholding_irq_en)
mce_threshold_vector = amd_threshold_interrupt; mce_threshold_vector = amd_threshold_interrupt;
return 0; return 0;
out_err:
mce_threshold_remove_device(cpu);
return err;
} }
...@@ -69,7 +69,9 @@ DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); ...@@ -69,7 +69,9 @@ DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
struct mce_bank { struct mce_bank {
u64 ctl; /* subevents to enable */ u64 ctl; /* subevents to enable */
bool init; /* initialise bank? */
__u64 init : 1, /* initialise bank? */
__reserved_1 : 63;
}; };
static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array); static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
......
...@@ -301,85 +301,65 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) ...@@ -301,85 +301,65 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
} }
} }
static __always_inline int mce_severity_amd_smca(struct mce *m, enum context err_ctx) /* See AMD PPR(s) section Machine Check Error Handling. */
static noinstr int mce_severity_amd(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp)
{ {
u64 mcx_cfg; char *panic_msg = NULL;
int ret;
/* /*
* We need to look at the following bits: * Default return value: Action required, the error must be handled
* - "succor" bit (data poisoning support), and * immediately.
* - TCC bit (Task Context Corrupt)
* in MCi_STATUS to determine error severity.
*/ */
if (!mce_flags.succor) ret = MCE_AR_SEVERITY;
return MCE_PANIC_SEVERITY;
mcx_cfg = mce_rdmsrl(MSR_AMD64_SMCA_MCx_CONFIG(m->bank));
/* TCC (Task context corrupt). If set and if IN_KERNEL, panic. */
if ((mcx_cfg & MCI_CONFIG_MCAX) &&
(m->status & MCI_STATUS_TCC) &&
(err_ctx == IN_KERNEL))
return MCE_PANIC_SEVERITY;
/* ...otherwise invoke hwpoison handler. */
return MCE_AR_SEVERITY;
}
/*
* See AMD Error Scope Hierarchy table in a newer BKDG. For example
* 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
*/
static noinstr int mce_severity_amd(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp)
{
enum context ctx = error_context(m, regs);
/* Processor Context Corrupt, no need to fumble too much, die! */ /* Processor Context Corrupt, no need to fumble too much, die! */
if (m->status & MCI_STATUS_PCC) if (m->status & MCI_STATUS_PCC) {
return MCE_PANIC_SEVERITY; panic_msg = "Processor Context Corrupt";
ret = MCE_PANIC_SEVERITY;
if (m->status & MCI_STATUS_UC) { goto out;
}
if (ctx == IN_KERNEL)
return MCE_PANIC_SEVERITY;
/* if (m->status & MCI_STATUS_DEFERRED) {
* On older systems where overflow_recov flag is not present, we ret = MCE_DEFERRED_SEVERITY;
* should simply panic if an error overflow occurs. If goto out;
* overflow_recov flag is present and set, then software can try
* to at least kill process to prolong system operation.
*/
if (mce_flags.overflow_recov) {
if (mce_flags.smca)
return mce_severity_amd_smca(m, ctx);
/* kill current process */
return MCE_AR_SEVERITY;
} else {
/* at least one error was not logged */
if (m->status & MCI_STATUS_OVER)
return MCE_PANIC_SEVERITY;
}
/*
* For any other case, return MCE_UC_SEVERITY so that we log the
* error and exit #MC handler.
*/
return MCE_UC_SEVERITY;
} }
/* /*
* deferred error: poll handler catches these and adds to mce_ring so * If the UC bit is not set, the system either corrected or deferred
* memory-failure can take recovery actions. * the error. No action will be required after logging the error.
*/ */
if (m->status & MCI_STATUS_DEFERRED) if (!(m->status & MCI_STATUS_UC)) {
return MCE_DEFERRED_SEVERITY; ret = MCE_KEEP_SEVERITY;
goto out;
}
/* /*
* corrected error: poll handler catches these and passes responsibility * On MCA overflow, without the MCA overflow recovery feature the
* of decoding the error to EDAC * system will not be able to recover, panic.
*/ */
return MCE_KEEP_SEVERITY; if ((m->status & MCI_STATUS_OVER) && !mce_flags.overflow_recov) {
panic_msg = "Overflowed uncorrected error without MCA Overflow Recovery";
ret = MCE_PANIC_SEVERITY;
goto out;
}
if (!mce_flags.succor) {
panic_msg = "Uncorrected error without MCA Recovery";
ret = MCE_PANIC_SEVERITY;
goto out;
}
if (error_context(m, regs) == IN_KERNEL) {
panic_msg = "Uncorrected unrecoverable error in kernel context";
ret = MCE_PANIC_SEVERITY;
}
out:
if (msg && panic_msg)
*msg = panic_msg;
return ret;
} }
static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp) static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment