Commit b831ef2c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RAS changes from Ingo Molnar:
 "The main system reliability related changes were from x86, but also
  some generic RAS changes:

   - AMD MCE error injection subsystem enhancements.  (Aravind
     Gopalakrishnan)

   - Fix MCE and CPU hotplug interaction bug.  (Ashok Raj)

   - kcrash bootup robustness fix.  (Baoquan He)

   - kcrash cleanups.  (Borislav Petkov)

   - x86 microcode driver rework: simplify it by unmodularizing it and
     other cleanups.  (Borislav Petkov)"

* 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
  x86/mce: Add a default case to the switch in __mcheck_cpu_ancient_init()
  x86/mce: Add a Scalable MCA vendor flags bit
  MAINTAINERS: Unify the microcode driver section
  x86/microcode/intel: Move #ifdef DEBUG inside the function
  x86/microcode/amd: Remove maintainers from comments
  x86/microcode: Remove modularization leftovers
  x86/microcode: Merge the early microcode loader
  x86/microcode: Unmodularize the microcode driver
  x86/mce: Fix thermal throttling reporting after kexec
  kexec/crash: Say which char is the unrecognized
  x86/setup/crash: Check memblock_reserve() retval
  x86/setup/crash: Cleanup some more
  x86/setup/crash: Remove alignment variable
  x86/setup: Cleanup crashkernel reservation functions
  x86/amd_nb, EDAC: Rename amd_get_node_id()
  x86/setup: Do not reserve crashkernel high memory if low reservation failed
  x86/microcode/amd: Do not overwrite final patch levels
  x86/microcode/amd: Extract current patch level read to a function
  x86/ras/mce_amd_inj: Inject bank 4 errors on the NBC
  x86/ras/mce_amd_inj: Trigger deferred and thresholding errors interrupts
  ...
parents b02ac6b1 dc34bdd2
...@@ -660,11 +660,6 @@ F: drivers/gpu/drm/radeon/radeon_kfd.c ...@@ -660,11 +660,6 @@ F: drivers/gpu/drm/radeon/radeon_kfd.c
F: drivers/gpu/drm/radeon/radeon_kfd.h F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h F: include/uapi/linux/kfd_ioctl.h
AMD MICROCODE UPDATE SUPPORT
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/amd*
AMD XGBE DRIVER AMD XGBE DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com> M: Tom Lendacky <thomas.lendacky@amd.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
...@@ -5468,12 +5463,6 @@ W: https://01.org/linux-acpi ...@@ -5468,12 +5463,6 @@ W: https://01.org/linux-acpi
S: Supported S: Supported
F: drivers/platform/x86/intel_menlow.c F: drivers/platform/x86/intel_menlow.c
INTEL IA32 MICROCODE UPDATE SUPPORT
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/core*
F: arch/x86/kernel/cpu/microcode/intel*
INTEL I/OAT DMA DRIVER INTEL I/OAT DMA DRIVER
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
R: Dan Williams <dan.j.williams@intel.com> R: Dan Williams <dan.j.williams@intel.com>
...@@ -11505,6 +11494,11 @@ L: linux-edac@vger.kernel.org ...@@ -11505,6 +11494,11 @@ L: linux-edac@vger.kernel.org
S: Maintained S: Maintained
F: arch/x86/kernel/cpu/mcheck/* F: arch/x86/kernel/cpu/mcheck/*
X86 MICROCODE UPDATE SUPPORT
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/*
X86 VDSO X86 VDSO
M: Andy Lutomirski <luto@amacapital.net> M: Andy Lutomirski <luto@amacapital.net>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
......
...@@ -1123,8 +1123,10 @@ config X86_REBOOTFIXUPS ...@@ -1123,8 +1123,10 @@ config X86_REBOOTFIXUPS
Say N otherwise. Say N otherwise.
config MICROCODE config MICROCODE
tristate "CPU microcode loading support" bool "CPU microcode loading support"
default y
depends on CPU_SUP_AMD || CPU_SUP_INTEL depends on CPU_SUP_AMD || CPU_SUP_INTEL
depends on BLK_DEV_INITRD
select FW_LOADER select FW_LOADER
---help--- ---help---
...@@ -1166,24 +1168,6 @@ config MICROCODE_OLD_INTERFACE ...@@ -1166,24 +1168,6 @@ config MICROCODE_OLD_INTERFACE
def_bool y def_bool y
depends on MICROCODE depends on MICROCODE
config MICROCODE_INTEL_EARLY
bool
config MICROCODE_AMD_EARLY
bool
config MICROCODE_EARLY
bool "Early load microcode"
depends on MICROCODE=y && BLK_DEV_INITRD
select MICROCODE_INTEL_EARLY if MICROCODE_INTEL
select MICROCODE_AMD_EARLY if MICROCODE_AMD
default y
help
This option provides functionality to read additional microcode data
at the beginning of initrd image. The data tells kernel to load
microcode to CPU's as early as possible. No functional change if no
microcode data is glued to the initrd, therefore it's safe to say Y.
config X86_MSR config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support" tristate "/dev/cpu/*/msr - Model-specific register support"
---help--- ---help---
......
...@@ -81,7 +81,7 @@ static inline struct amd_northbridge *node_to_amd_nb(int node) ...@@ -81,7 +81,7 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
} }
static inline u16 amd_get_node_id(struct pci_dev *pdev) static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
{ {
struct pci_dev *misc; struct pci_dev *misc;
int i; int i;
......
...@@ -123,19 +123,27 @@ struct mca_config { ...@@ -123,19 +123,27 @@ struct mca_config {
}; };
struct mce_vendor_flags { struct mce_vendor_flags {
/* /*
* overflow recovery cpuid bit indicates that overflow * Indicates that overflow conditions are not fatal, when set.
* conditions are not fatal */
*/ __u64 overflow_recov : 1,
__u64 overflow_recov : 1,
/*
/* * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
* SUCCOR stands for S/W UnCorrectable error COntainment * Recovery. It indicates support for data poisoning in HW and deferred
* and Recovery. It indicates support for data poisoning * error interrupts.
* in HW and deferred error interrupts. */
*/ succor : 1,
succor : 1,
__reserved_0 : 62; /*
* (AMD) SMCA: This bit indicates support for Scalable MCA which expands
* the register space for each MCA bank and also increases number of
* banks. Also, to accommodate the new banks and registers, the MCA
* register space is moved to a new MSR range.
*/
smca : 1,
__reserved_0 : 61;
}; };
extern struct mce_vendor_flags mce_flags; extern struct mce_vendor_flags mce_flags;
......
...@@ -27,7 +27,6 @@ struct cpu_signature { ...@@ -27,7 +27,6 @@ struct cpu_signature {
struct device; struct device;
enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
extern bool dis_ucode_ldr;
struct microcode_ops { struct microcode_ops {
enum ucode_state (*request_microcode_user) (int cpu, enum ucode_state (*request_microcode_user) (int cpu,
...@@ -55,6 +54,12 @@ struct ucode_cpu_info { ...@@ -55,6 +54,12 @@ struct ucode_cpu_info {
}; };
extern struct ucode_cpu_info ucode_cpu_info[]; extern struct ucode_cpu_info ucode_cpu_info[];
#ifdef CONFIG_MICROCODE
int __init microcode_init(void);
#else
static inline int __init microcode_init(void) { return 0; };
#endif
#ifdef CONFIG_MICROCODE_INTEL #ifdef CONFIG_MICROCODE_INTEL
extern struct microcode_ops * __init init_intel_microcode(void); extern struct microcode_ops * __init init_intel_microcode(void);
#else #else
...@@ -75,7 +80,6 @@ static inline struct microcode_ops * __init init_amd_microcode(void) ...@@ -75,7 +80,6 @@ static inline struct microcode_ops * __init init_amd_microcode(void)
static inline void __exit exit_amd_microcode(void) {} static inline void __exit exit_amd_microcode(void) {}
#endif #endif
#ifdef CONFIG_MICROCODE_EARLY
#define MAX_UCODE_COUNT 128 #define MAX_UCODE_COUNT 128
#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
...@@ -150,22 +154,18 @@ static inline unsigned int x86_model(unsigned int sig) ...@@ -150,22 +154,18 @@ static inline unsigned int x86_model(unsigned int sig)
return model; return model;
} }
#ifdef CONFIG_MICROCODE
extern void __init load_ucode_bsp(void); extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void); extern void load_ucode_ap(void);
extern int __init save_microcode_in_initrd(void); extern int __init save_microcode_in_initrd(void);
void reload_early_microcode(void); void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
#else #else
static inline void __init load_ucode_bsp(void) {} static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) {} static inline void load_ucode_ap(void) { }
static inline int __init save_microcode_in_initrd(void) static inline int __init save_microcode_in_initrd(void) { return 0; }
{ static inline void reload_early_microcode(void) { }
return 0; static inline bool
} get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
static inline void reload_early_microcode(void) {}
static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name)
{
return false;
}
#endif #endif
#endif /* _ASM_X86_MICROCODE_H */ #endif /* _ASM_X86_MICROCODE_H */
...@@ -64,7 +64,7 @@ extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, s ...@@ -64,7 +64,7 @@ extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, s
#define PATCH_MAX_SIZE PAGE_SIZE #define PATCH_MAX_SIZE PAGE_SIZE
extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
#ifdef CONFIG_MICROCODE_AMD_EARLY #ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family); extern void __init load_ucode_amd_bsp(unsigned int family);
extern void load_ucode_amd_ap(void); extern void load_ucode_amd_ap(void);
extern int __init save_microcode_in_initrd_amd(void); extern int __init save_microcode_in_initrd_amd(void);
...@@ -76,4 +76,5 @@ static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } ...@@ -76,4 +76,5 @@ static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
void reload_ucode_amd(void) {} void reload_ucode_amd(void) {}
#endif #endif
extern bool check_current_patch_level(u32 *rev, bool early);
#endif /* _ASM_X86_MICROCODE_AMD_H */ #endif /* _ASM_X86_MICROCODE_AMD_H */
...@@ -57,7 +57,7 @@ extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev); ...@@ -57,7 +57,7 @@ extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
extern int microcode_sanity_check(void *mc, int print_err); extern int microcode_sanity_check(void *mc, int print_err);
extern int find_matching_signature(void *mc, unsigned int csig, int cpf); extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
#ifdef CONFIG_MICROCODE_INTEL_EARLY #ifdef CONFIG_MICROCODE_INTEL
extern void __init load_ucode_intel_bsp(void); extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void); extern void load_ucode_intel_ap(void);
extern void show_ucode_info_early(void); extern void show_ucode_info_early(void);
...@@ -71,13 +71,9 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; ...@@ -71,13 +71,9 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL;
static inline void reload_ucode_intel(void) {} static inline void reload_ucode_intel(void) {}
#endif #endif
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) #ifdef CONFIG_HOTPLUG_CPU
extern int save_mc_for_early(u8 *mc); extern int save_mc_for_early(u8 *mc);
#else #else
static inline int save_mc_for_early(u8 *mc) static inline int save_mc_for_early(u8 *mc) { return 0; }
{
return 0;
}
#endif #endif
#endif /* _ASM_X86_MICROCODE_INTEL_H */ #endif /* _ASM_X86_MICROCODE_INTEL_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _UAPI_ASM_X86_MCE_H #define _UAPI_ASM_X86_MCE_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/ioctls.h> #include <linux/ioctl.h>
/* Fields are zero when not available */ /* Fields are zero when not available */
struct mce { struct mce {
......
...@@ -1586,6 +1586,8 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) ...@@ -1586,6 +1586,8 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
winchip_mcheck_init(c); winchip_mcheck_init(c);
return 1; return 1;
break; break;
default:
return 0;
} }
return 0; return 0;
...@@ -1605,6 +1607,8 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) ...@@ -1605,6 +1607,8 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
mce_amd_feature_init(c); mce_amd_feature_init(c);
mce_flags.overflow_recov = !!(ebx & BIT(0)); mce_flags.overflow_recov = !!(ebx & BIT(0));
mce_flags.succor = !!(ebx & BIT(1)); mce_flags.succor = !!(ebx & BIT(1));
mce_flags.smca = !!(ebx & BIT(3));
break; break;
} }
...@@ -2042,7 +2046,7 @@ int __init mcheck_init(void) ...@@ -2042,7 +2046,7 @@ int __init mcheck_init(void)
* Disable machine checks on suspend and shutdown. We can't really handle * Disable machine checks on suspend and shutdown. We can't really handle
* them later. * them later.
*/ */
static int mce_disable_error_reporting(void) static void mce_disable_error_reporting(void)
{ {
int i; int i;
...@@ -2052,17 +2056,32 @@ static int mce_disable_error_reporting(void) ...@@ -2052,17 +2056,32 @@ static int mce_disable_error_reporting(void)
if (b->init) if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), 0); wrmsrl(MSR_IA32_MCx_CTL(i), 0);
} }
return 0; return;
}
static void vendor_disable_error_reporting(void)
{
/*
* Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
* Disabling them for just a single offlined CPU is bad, since it will
* inhibit reporting for all shared resources on the socket like the
* last level cache (LLC), the integrated memory controller (iMC), etc.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return;
mce_disable_error_reporting();
} }
static int mce_syscore_suspend(void) static int mce_syscore_suspend(void)
{ {
return mce_disable_error_reporting(); vendor_disable_error_reporting();
return 0;
} }
static void mce_syscore_shutdown(void) static void mce_syscore_shutdown(void)
{ {
mce_disable_error_reporting(); vendor_disable_error_reporting();
} }
/* /*
...@@ -2342,19 +2361,14 @@ static void mce_device_remove(unsigned int cpu) ...@@ -2342,19 +2361,14 @@ static void mce_device_remove(unsigned int cpu)
static void mce_disable_cpu(void *h) static void mce_disable_cpu(void *h)
{ {
unsigned long action = *(unsigned long *)h; unsigned long action = *(unsigned long *)h;
int i;
if (!mce_available(raw_cpu_ptr(&cpu_info))) if (!mce_available(raw_cpu_ptr(&cpu_info)))
return; return;
if (!(action & CPU_TASKS_FROZEN)) if (!(action & CPU_TASKS_FROZEN))
cmci_clear(); cmci_clear();
for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
if (b->init) vendor_disable_error_reporting();
wrmsrl(MSR_IA32_MCx_CTL(i), 0);
}
} }
static void mce_reenable_cpu(void *h) static void mce_reenable_cpu(void *h)
......
...@@ -503,14 +503,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -503,14 +503,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
return; return;
} }
/* Check whether a vector already exists */
if (h & APIC_VECTOR_MASK) {
printk(KERN_DEBUG
"CPU%d: Thermal LVT vector (%#x) already installed\n",
cpu, (h & APIC_VECTOR_MASK));
return;
}
/* early Pentium M models use different method for enabling TM2 */ /* early Pentium M models use different method for enabling TM2 */
if (cpu_has(c, X86_FEATURE_TM2)) { if (cpu_has(c, X86_FEATURE_TM2)) {
if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
......
...@@ -2,6 +2,3 @@ microcode-y := core.o ...@@ -2,6 +2,3 @@ microcode-y := core.o
obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_MICROCODE) += microcode.o
microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
microcode-$(CONFIG_MICROCODE_AMD) += amd.o microcode-$(CONFIG_MICROCODE_AMD) += amd.o
obj-$(CONFIG_MICROCODE_EARLY) += core_early.o
obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o
obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o
This diff is collapsed.
This diff is collapsed.
...@@ -5,6 +5,12 @@ ...@@ -5,6 +5,12 @@
* 2006 Shaohua Li <shaohua.li@intel.com> * 2006 Shaohua Li <shaohua.li@intel.com>
* 2013-2015 Borislav Petkov <bp@alien8.de> * 2013-2015 Borislav Petkov <bp@alien8.de>
* *
* X86 CPU microcode early update for Linux:
*
* Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
* H Peter Anvin" <hpa@zytor.com>
* (C) 2015 Borislav Petkov <bp@alien8.de>
*
* This driver allows to upgrade microcode on x86 processors. * This driver allows to upgrade microcode on x86 processors.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
...@@ -13,34 +19,39 @@ ...@@ -13,34 +19,39 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) "microcode: " fmt
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/firmware.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/syscore_ops.h>
#include <asm/microcode.h> #include <asm/microcode_intel.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/microcode_amd.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
MODULE_DESCRIPTION("Microcode Update Driver"); #define MICROCODE_VERSION "2.01"
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
MODULE_LICENSE("GPL");
#define MICROCODE_VERSION "2.00"
static struct microcode_ops *microcode_ops; static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr; static bool dis_ucode_ldr;
module_param(dis_ucode_ldr, bool, 0);
static int __init disable_loader(char *str)
{
dis_ucode_ldr = true;
return 1;
}
__setup("dis_ucode_ldr", disable_loader);
/* /*
* Synchronization. * Synchronization.
...@@ -68,6 +79,150 @@ struct cpu_info_ctx { ...@@ -68,6 +79,150 @@ struct cpu_info_ctx {
int err; int err;
}; };
static bool __init check_loader_disabled_bsp(void)
{
#ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
const char *opt = "dis_ucode_ldr";
const char *option = (const char *)__pa_nodebug(opt);
bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
#else /* CONFIG_X86_64 */
const char *cmdline = boot_command_line;
const char *option = "dis_ucode_ldr";
bool *res = &dis_ucode_ldr;
#endif
if (cmdline_find_option_bool(cmdline, option))
*res = true;
return *res;
}
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
bool get_builtin_firmware(struct cpio_data *cd, const char *name)
{
#ifdef CONFIG_FW_LOADER
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
if (!strcmp(name, b_fw->name)) {
cd->size = b_fw->size;
cd->data = b_fw->data;
return true;
}
}
#endif
return false;
}
void __init load_ucode_bsp(void)
{
int vendor;
unsigned int family;
if (check_loader_disabled_bsp())
return;
if (!have_cpuid_p())
return;
vendor = x86_vendor();
family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (family >= 6)
load_ucode_intel_bsp();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
load_ucode_amd_bsp(family);
break;
default:
break;
}
}
static bool check_loader_disabled_ap(void)
{
#ifdef CONFIG_X86_32
return *((bool *)__pa_nodebug(&dis_ucode_ldr));
#else
return dis_ucode_ldr;
#endif
}
void load_ucode_ap(void)
{
int vendor, family;
if (check_loader_disabled_ap())
return;
if (!have_cpuid_p())
return;
vendor = x86_vendor();
family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (family >= 6)
load_ucode_intel_ap();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
load_ucode_amd_ap();
break;
default:
break;
}
}
int __init save_microcode_in_initrd(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
if (c->x86 >= 6)
save_microcode_in_initrd_intel();
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
save_microcode_in_initrd_amd();
break;
default:
break;
}
return 0;
}
void reload_early_microcode(void)
{
int vendor, family;
vendor = x86_vendor();
family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (family >= 6)
reload_ucode_intel();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
reload_ucode_amd();
break;
default:
break;
}
}
static void collect_cpu_info_local(void *arg) static void collect_cpu_info_local(void *arg)
{ {
struct cpu_info_ctx *ctx = arg; struct cpu_info_ctx *ctx = arg;
...@@ -210,9 +365,6 @@ static void __exit microcode_dev_exit(void) ...@@ -210,9 +365,6 @@ static void __exit microcode_dev_exit(void)
{ {
misc_deregister(&microcode_dev); misc_deregister(&microcode_dev);
} }
MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
MODULE_ALIAS("devname:cpu/microcode");
#else #else
#define microcode_dev_init() 0 #define microcode_dev_init() 0
#define microcode_dev_exit() do { } while (0) #define microcode_dev_exit() do { } while (0)
...@@ -463,20 +615,6 @@ static struct notifier_block mc_cpu_notifier = { ...@@ -463,20 +615,6 @@ static struct notifier_block mc_cpu_notifier = {
.notifier_call = mc_cpu_callback, .notifier_call = mc_cpu_callback,
}; };
#ifdef MODULE
/* Autoload on Intel and AMD systems */
static const struct x86_cpu_id __initconst microcode_id[] = {
#ifdef CONFIG_MICROCODE_INTEL
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, },
#endif
#ifdef CONFIG_MICROCODE_AMD
{ X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, },
#endif
{}
};
MODULE_DEVICE_TABLE(x86cpu, microcode_id);
#endif
static struct attribute *cpu_root_microcode_attrs[] = { static struct attribute *cpu_root_microcode_attrs[] = {
&dev_attr_reload.attr, &dev_attr_reload.attr,
NULL NULL
...@@ -487,9 +625,9 @@ static struct attribute_group cpu_root_microcode_group = { ...@@ -487,9 +625,9 @@ static struct attribute_group cpu_root_microcode_group = {
.attrs = cpu_root_microcode_attrs, .attrs = cpu_root_microcode_attrs,
}; };
static int __init microcode_init(void) int __init microcode_init(void)
{ {
struct cpuinfo_x86 *c = &cpu_data(0); struct cpuinfo_x86 *c = &boot_cpu_data;
int error; int error;
if (paravirt_enabled() || dis_ucode_ldr) if (paravirt_enabled() || dis_ucode_ldr)
...@@ -560,35 +698,3 @@ static int __init microcode_init(void) ...@@ -560,35 +698,3 @@ static int __init microcode_init(void)
return error; return error;
} }
module_init(microcode_init);
static void __exit microcode_exit(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
microcode_dev_exit();
unregister_hotcpu_notifier(&mc_cpu_notifier);
unregister_syscore_ops(&mc_syscore_ops);
sysfs_remove_group(&cpu_subsys.dev_root->kobj,
&cpu_root_microcode_group);
get_online_cpus();
mutex_lock(&microcode_mutex);
subsys_interface_unregister(&mc_cpu_interface);
mutex_unlock(&microcode_mutex);
put_online_cpus();
platform_device_unregister(microcode_pdev);
microcode_ops = NULL;
if (c->x86_vendor == X86_VENDOR_AMD)
exit_amd_microcode();
pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
}
module_exit(microcode_exit);
/*
* X86 CPU microcode early update for Linux
*
* Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
* H Peter Anvin" <hpa@zytor.com>
* (C) 2015 Borislav Petkov <bp@alien8.de>
*
* This driver allows to early upgrade microcode on Intel processors
* belonging to IA-32 family - PentiumPro, Pentium II,
* Pentium III, Xeon, Pentium 4, etc.
*
* Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture
* Software Developer's Manual.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
#include <asm/microcode_amd.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
static bool __init check_loader_disabled_bsp(void)
{
#ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
const char *opt = "dis_ucode_ldr";
const char *option = (const char *)__pa_nodebug(opt);
bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
#else /* CONFIG_X86_64 */
const char *cmdline = boot_command_line;
const char *option = "dis_ucode_ldr";
bool *res = &dis_ucode_ldr;
#endif
if (cmdline_find_option_bool(cmdline, option))
*res = true;
return *res;
}
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
bool get_builtin_firmware(struct cpio_data *cd, const char *name)
{
#ifdef CONFIG_FW_LOADER
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
if (!strcmp(name, b_fw->name)) {
cd->size = b_fw->size;
cd->data = b_fw->data;
return true;
}
}
#endif
return false;
}
void __init load_ucode_bsp(void)
{
int vendor;
unsigned int family;
if (check_loader_disabled_bsp())
return;
if (!have_cpuid_p())
return;
vendor = x86_vendor();
family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (family >= 6)
load_ucode_intel_bsp();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
load_ucode_amd_bsp(family);
break;
default:
break;
}
}
static bool check_loader_disabled_ap(void)
{
#ifdef CONFIG_X86_32
return *((bool *)__pa_nodebug(&dis_ucode_ldr));
#else
return dis_ucode_ldr;
#endif
}
void load_ucode_ap(void)
{
int vendor, family;
if (check_loader_disabled_ap())
return;
if (!have_cpuid_p())
return;
vendor = x86_vendor();
family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (family >= 6)
load_ucode_intel_ap();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
load_ucode_amd_ap();
break;
default:
break;
}
}
int __init save_microcode_in_initrd(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
if (c->x86 >= 6)
save_microcode_in_initrd_intel();
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
save_microcode_in_initrd_amd();
break;
default:
break;
}
return 0;
}
void reload_early_microcode(void)
{
int vendor, family;
vendor = x86_vendor();
family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (family >= 6)
reload_ucode_intel();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
reload_ucode_amd();
break;
default:
break;
}
}
This diff is collapsed.
This diff is collapsed.
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <asm/microcode_intel.h> #include <asm/microcode_intel.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -152,7 +152,7 @@ ENTRY(startup_32) ...@@ -152,7 +152,7 @@ ENTRY(startup_32)
movl %eax, pa(olpc_ofw_pgd) movl %eax, pa(olpc_ofw_pgd)
#endif #endif
#ifdef CONFIG_MICROCODE_EARLY #ifdef CONFIG_MICROCODE
/* Early load ucode on BSP. */ /* Early load ucode on BSP. */
call load_ucode_bsp call load_ucode_bsp
#endif #endif
...@@ -311,12 +311,11 @@ ENTRY(startup_32_smp) ...@@ -311,12 +311,11 @@ ENTRY(startup_32_smp)
movl %eax,%ss movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp leal -__PAGE_OFFSET(%ecx),%esp
#ifdef CONFIG_MICROCODE_EARLY #ifdef CONFIG_MICROCODE
/* Early load ucode on AP. */ /* Early load ucode on AP. */
call load_ucode_ap call load_ucode_ap
#endif #endif
default_entry: default_entry:
#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
......
...@@ -111,6 +111,7 @@ ...@@ -111,6 +111,7 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/microcode.h>
/* /*
* max_low_pfn_mapped: highest direct mapped pfn under 4GB * max_low_pfn_mapped: highest direct mapped pfn under 4GB
...@@ -480,34 +481,34 @@ static void __init memblock_x86_reserve_range_setup_data(void) ...@@ -480,34 +481,34 @@ static void __init memblock_x86_reserve_range_setup_data(void)
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
/* 16M alignment for crash kernel regions */
#define CRASH_ALIGN (16 << 20)
/* /*
* Keep the crash kernel below this limit. On 32 bits earlier kernels * Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions. * would limit the kernel to the low 512 MiB due to mapping restrictions.
* On 64bit, old kexec-tools need to under 896MiB. * On 64bit, old kexec-tools need to under 896MiB.
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) # define CRASH_ADDR_LOW_MAX (512 << 20)
# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) # define CRASH_ADDR_HIGH_MAX (512 << 20)
#else #else
# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) # define CRASH_ADDR_LOW_MAX (896UL << 20)
# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM # define CRASH_ADDR_HIGH_MAX MAXMEM
#endif #endif
static void __init reserve_crashkernel_low(void) static int __init reserve_crashkernel_low(void)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long base, low_base = 0, low_size = 0;
unsigned long long low_base = 0, low_size = 0;
unsigned long total_low_mem; unsigned long total_low_mem;
unsigned long long base;
bool auto_set = false;
int ret; int ret;
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
/* crashkernel=Y,low */ /* crashkernel=Y,low */
ret = parse_crashkernel_low(boot_command_line, total_low_mem, ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
&low_size, &base); if (ret) {
if (ret != 0) {
/* /*
* two parts from lib/swiotlb.c: * two parts from lib/swiotlb.c:
* -swiotlb size: user-specified with swiotlb= or default. * -swiotlb size: user-specified with swiotlb= or default.
...@@ -517,52 +518,52 @@ static void __init reserve_crashkernel_low(void) ...@@ -517,52 +518,52 @@ static void __init reserve_crashkernel_low(void)
* make sure we allocate enough extra low memory so that we * make sure we allocate enough extra low memory so that we
* don't run out of DMA buffers for 32-bit devices. * don't run out of DMA buffers for 32-bit devices.
*/ */
low_size = max(swiotlb_size_or_default() + (8UL<<20), 256UL<<20); low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
auto_set = true;
} else { } else {
/* passed with crashkernel=0,low ? */ /* passed with crashkernel=0,low ? */
if (!low_size) if (!low_size)
return; return 0;
} }
low_base = memblock_find_in_range(low_size, (1ULL<<32), low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
low_size, alignment);
if (!low_base) { if (!low_base) {
if (!auto_set) pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
pr_info("crashkernel low reservation failed - No suitable area found.\n"); (unsigned long)(low_size >> 20));
return -ENOMEM;
}
return; ret = memblock_reserve(low_base, low_size);
if (ret) {
pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
return ret;
} }
memblock_reserve(low_base, low_size);
pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
(unsigned long)(low_size >> 20), (unsigned long)(low_size >> 20),
(unsigned long)(low_base >> 20), (unsigned long)(low_base >> 20),
(unsigned long)(total_low_mem >> 20)); (unsigned long)(total_low_mem >> 20));
crashk_low_res.start = low_base; crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1; crashk_low_res.end = low_base + low_size - 1;
insert_resource(&iomem_resource, &crashk_low_res); insert_resource(&iomem_resource, &crashk_low_res);
#endif #endif
return 0;
} }
static void __init reserve_crashkernel(void) static void __init reserve_crashkernel(void)
{ {
const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long crash_size, crash_base, total_mem;
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
bool high = false; bool high = false;
int ret; int ret;
total_mem = memblock_phys_mem_size(); total_mem = memblock_phys_mem_size();
/* crashkernel=XM */ /* crashkernel=XM */
ret = parse_crashkernel(boot_command_line, total_mem, ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
&crash_size, &crash_base);
if (ret != 0 || crash_size <= 0) { if (ret != 0 || crash_size <= 0) {
/* crashkernel=X,high */ /* crashkernel=X,high */
ret = parse_crashkernel_high(boot_command_line, total_mem, ret = parse_crashkernel_high(boot_command_line, total_mem,
&crash_size, &crash_base); &crash_size, &crash_base);
if (ret != 0 || crash_size <= 0) if (ret != 0 || crash_size <= 0)
return; return;
high = true; high = true;
...@@ -573,11 +574,10 @@ static void __init reserve_crashkernel(void) ...@@ -573,11 +574,10 @@ static void __init reserve_crashkernel(void)
/* /*
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/ */
crash_base = memblock_find_in_range(alignment, crash_base = memblock_find_in_range(CRASH_ALIGN,
high ? CRASH_KERNEL_ADDR_HIGH_MAX : high ? CRASH_ADDR_HIGH_MAX
CRASH_KERNEL_ADDR_LOW_MAX, : CRASH_ADDR_LOW_MAX,
crash_size, alignment); crash_size, CRASH_ALIGN);
if (!crash_base) { if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n"); pr_info("crashkernel reservation failed - No suitable area found.\n");
return; return;
...@@ -587,26 +587,32 @@ static void __init reserve_crashkernel(void) ...@@ -587,26 +587,32 @@ static void __init reserve_crashkernel(void)
unsigned long long start; unsigned long long start;
start = memblock_find_in_range(crash_base, start = memblock_find_in_range(crash_base,
crash_base + crash_size, crash_size, 1<<20); crash_base + crash_size,
crash_size, 1 << 20);
if (start != crash_base) { if (start != crash_base) {
pr_info("crashkernel reservation failed - memory is in use.\n"); pr_info("crashkernel reservation failed - memory is in use.\n");
return; return;
} }
} }
memblock_reserve(crash_base, crash_size); ret = memblock_reserve(crash_base, crash_size);
if (ret) {
pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
return;
}
if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
memblock_free(crash_base, crash_size);
return;
}
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
"for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20),
(unsigned long)(crash_size >> 20), (unsigned long)(crash_base >> 20),
(unsigned long)(crash_base >> 20), (unsigned long)(total_mem >> 20));
(unsigned long)(total_mem >> 20));
crashk_res.start = crash_base; crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1; crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res); insert_resource(&iomem_resource, &crashk_res);
if (crash_base >= (1ULL<<32))
reserve_crashkernel_low();
} }
#else #else
static void __init reserve_crashkernel(void) static void __init reserve_crashkernel(void)
...@@ -1244,6 +1250,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -1244,6 +1250,8 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT)) if (efi_enabled(EFI_BOOT))
efi_apply_memmap_quirks(); efi_apply_memmap_quirks();
#endif #endif
microcode_init();
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -693,14 +693,12 @@ void free_initmem(void) ...@@ -693,14 +693,12 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end) void __init free_initrd_mem(unsigned long start, unsigned long end)
{ {
#ifdef CONFIG_MICROCODE_EARLY
/* /*
* Remember, initrd memory may contain microcode or other useful things. * Remember, initrd memory may contain microcode or other useful things.
* Before we lose initrd mem, we need to find a place to hold them * Before we lose initrd mem, we need to find a place to hold them
* now that normal virtual memory is enabled. * now that normal virtual memory is enabled.
*/ */
save_microcode_in_initrd(); save_microcode_in_initrd();
#endif
/* /*
* end could be not aligned, and We can not align that, * end could be not aligned, and We can not align that,
......
config AMD_MCE_INJ config AMD_MCE_INJ
tristate "Simple MCE injection interface for AMD processors" tristate "Simple MCE injection interface for AMD processors"
depends on RAS && EDAC_DECODE_MCE && DEBUG_FS depends on RAS && EDAC_DECODE_MCE && DEBUG_FS && AMD_NB
default n default n
help help
This is a simple debugfs interface to inject MCEs and test different This is a simple debugfs interface to inject MCEs and test different
aspects of the MCE handling code. aspects of the MCE handling code.
WARNING: Do not even assume this interface is staying stable! WARNING: Do not even assume this interface is staying stable!
...@@ -17,7 +17,11 @@ ...@@ -17,7 +17,11 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pci.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/amd_nb.h>
#include <asm/irq_vectors.h>
#include "../kernel/cpu/mcheck/mce-internal.h" #include "../kernel/cpu/mcheck/mce-internal.h"
...@@ -30,16 +34,21 @@ static struct dentry *dfs_inj; ...@@ -30,16 +34,21 @@ static struct dentry *dfs_inj;
static u8 n_banks; static u8 n_banks;
#define MAX_FLAG_OPT_SIZE 3 #define MAX_FLAG_OPT_SIZE 3
#define NBCFG 0x44
enum injection_type { enum injection_type {
SW_INJ = 0, /* SW injection, simply decode the error */ SW_INJ = 0, /* SW injection, simply decode the error */
HW_INJ, /* Trigger a #MC */ HW_INJ, /* Trigger a #MC */
DFR_INT_INJ, /* Trigger Deferred error interrupt */
THR_INT_INJ, /* Trigger threshold interrupt */
N_INJ_TYPES, N_INJ_TYPES,
}; };
static const char * const flags_options[] = { static const char * const flags_options[] = {
[SW_INJ] = "sw", [SW_INJ] = "sw",
[HW_INJ] = "hw", [HW_INJ] = "hw",
[DFR_INT_INJ] = "df",
[THR_INT_INJ] = "th",
NULL NULL
}; };
...@@ -129,12 +138,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, ...@@ -129,12 +138,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf,
{ {
char buf[MAX_FLAG_OPT_SIZE], *__buf; char buf[MAX_FLAG_OPT_SIZE], *__buf;
int err; int err;
size_t ret;
if (cnt > MAX_FLAG_OPT_SIZE) if (cnt > MAX_FLAG_OPT_SIZE)
cnt = MAX_FLAG_OPT_SIZE; return -EINVAL;
ret = cnt;
if (copy_from_user(&buf, ubuf, cnt)) if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT; return -EFAULT;
...@@ -150,9 +156,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, ...@@ -150,9 +156,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf,
return err; return err;
} }
*ppos += ret; *ppos += cnt;
return ret; return cnt;
} }
static const struct file_operations flags_fops = { static const struct file_operations flags_fops = {
...@@ -185,6 +191,55 @@ static void trigger_mce(void *info) ...@@ -185,6 +191,55 @@ static void trigger_mce(void *info)
asm volatile("int $18"); asm volatile("int $18");
} }
static void trigger_dfr_int(void *info)
{
asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
}
static void trigger_thr_int(void *info)
{
asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
}
static u32 get_nbc_for_node(int node_id)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
u32 cores_per_node;
cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket();
return cores_per_node * node_id;
}
static void toggle_nb_mca_mst_cpu(u16 nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
u32 val;
int err;
if (!F3)
return;
err = pci_read_config_dword(F3, NBCFG, &val);
if (err) {
pr_err("%s: Error reading F%dx%03x.\n",
__func__, PCI_FUNC(F3->devfn), NBCFG);
return;
}
if (val & BIT(27))
return;
pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
__func__);
val |= BIT(27);
err = pci_write_config_dword(F3, NBCFG, val);
if (err)
pr_err("%s: Error writing F%dx%03x.\n",
__func__, PCI_FUNC(F3->devfn), NBCFG);
}
static void do_inject(void) static void do_inject(void)
{ {
u64 mcg_status = 0; u64 mcg_status = 0;
...@@ -205,6 +260,26 @@ static void do_inject(void) ...@@ -205,6 +260,26 @@ static void do_inject(void)
if (!(i_mce.status & MCI_STATUS_PCC)) if (!(i_mce.status & MCI_STATUS_PCC))
mcg_status |= MCG_STATUS_RIPV; mcg_status |= MCG_STATUS_RIPV;
/*
* Ensure necessary status bits for deferred errors:
* - MCx_STATUS[Deferred]: make sure it is a deferred error
* - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
*/
if (inj_type == DFR_INT_INJ) {
i_mce.status |= MCI_STATUS_DEFERRED;
i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
}
/*
* For multi node CPUs, logging and reporting of bank 4 errors happens
* only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
* Fam10h and later BKDGs.
*/
if (static_cpu_has(X86_FEATURE_AMD_DCM) && b == 4) {
toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
cpu = get_nbc_for_node(amd_get_nb_id(cpu));
}
get_online_cpus(); get_online_cpus();
if (!cpu_online(cpu)) if (!cpu_online(cpu))
goto err; goto err;
...@@ -225,7 +300,16 @@ static void do_inject(void) ...@@ -225,7 +300,16 @@ static void do_inject(void)
toggle_hw_mce_inject(cpu, false); toggle_hw_mce_inject(cpu, false);
smp_call_function_single(cpu, trigger_mce, NULL, 0); switch (inj_type) {
case DFR_INT_INJ:
smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
break;
case THR_INT_INJ:
smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
break;
default:
smp_call_function_single(cpu, trigger_mce, NULL, 0);
}
err: err:
put_online_cpus(); put_online_cpus();
...@@ -290,6 +374,11 @@ static const char readme_msg[] = ...@@ -290,6 +374,11 @@ static const char readme_msg[] =
"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" "\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" "\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
"\t before injecting.\n" "\t before injecting.\n"
"\t - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
"\t error APIC interrupt handler to handle the error if the feature is \n"
"\t is present in hardware. \n"
"\t - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
"\t APIC interrupt handler to handle the error. \n"
"\n"; "\n";
static ssize_t static ssize_t
......
...@@ -2785,7 +2785,7 @@ static int init_one_instance(struct pci_dev *F2) ...@@ -2785,7 +2785,7 @@ static int init_one_instance(struct pci_dev *F2)
struct mem_ctl_info *mci = NULL; struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2]; struct edac_mc_layer layers[2];
int err = 0, ret; int err = 0, ret;
u16 nid = amd_get_node_id(F2); u16 nid = amd_pci_dev_to_node_id(F2);
ret = -ENOMEM; ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
...@@ -2875,7 +2875,7 @@ static int init_one_instance(struct pci_dev *F2) ...@@ -2875,7 +2875,7 @@ static int init_one_instance(struct pci_dev *F2)
static int probe_one_instance(struct pci_dev *pdev, static int probe_one_instance(struct pci_dev *pdev,
const struct pci_device_id *mc_type) const struct pci_device_id *mc_type)
{ {
u16 nid = amd_get_node_id(pdev); u16 nid = amd_pci_dev_to_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc; struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s; struct ecc_settings *s;
int ret = 0; int ret = 0;
...@@ -2925,7 +2925,7 @@ static void remove_one_instance(struct pci_dev *pdev) ...@@ -2925,7 +2925,7 @@ static void remove_one_instance(struct pci_dev *pdev)
{ {
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
struct amd64_pvt *pvt; struct amd64_pvt *pvt;
u16 nid = amd_get_node_id(pdev); u16 nid = amd_pci_dev_to_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc; struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid]; struct ecc_settings *s = ecc_stngs[nid];
......
...@@ -1149,7 +1149,7 @@ static int __init parse_crashkernel_simple(char *cmdline, ...@@ -1149,7 +1149,7 @@ static int __init parse_crashkernel_simple(char *cmdline,
if (*cur == '@') if (*cur == '@')
*crash_base = memparse(cur+1, &cur); *crash_base = memparse(cur+1, &cur);
else if (*cur != ' ' && *cur != '\0') { else if (*cur != ' ' && *cur != '\0') {
pr_warn("crashkernel: unrecognized char\n"); pr_warn("crashkernel: unrecognized char: %c\n", *cur);
return -EINVAL; return -EINVAL;
} }
...@@ -1186,12 +1186,12 @@ static int __init parse_crashkernel_suffix(char *cmdline, ...@@ -1186,12 +1186,12 @@ static int __init parse_crashkernel_suffix(char *cmdline,
/* check with suffix */ /* check with suffix */
if (strncmp(cur, suffix, strlen(suffix))) { if (strncmp(cur, suffix, strlen(suffix))) {
pr_warn("crashkernel: unrecognized char\n"); pr_warn("crashkernel: unrecognized char: %c\n", *cur);
return -EINVAL; return -EINVAL;
} }
cur += strlen(suffix); cur += strlen(suffix);
if (*cur != ' ' && *cur != '\0') { if (*cur != ' ' && *cur != '\0') {
pr_warn("crashkernel: unrecognized char\n"); pr_warn("crashkernel: unrecognized char: %c\n", *cur);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment