Commit e3420f98 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cpu_for_v6.4_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu model updates from Borislav Petkov:

 - Add Emerald Rapids to the list of Intel models supporting PPIN

 - Finally use a CPUID bit for split lock detection instead of
   enumerating every model

 - Make sure automatic IBRS is set on AMD, even though the AP bringup
   code does that now by replicating the MSR which contains the switch

* tag 'x86_cpu_for_v6.4_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu: Add Xeon Emerald Rapids to list of CPUs that support PPIN
  x86/split_lock: Enumerate architectural split lock disable bit
  x86/CPU/AMD: Make sure EFER[AIBRSE] is set
parents 1699dbeb 36168bc0
...@@ -1009,6 +1009,17 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -1009,6 +1009,17 @@ static void init_amd(struct cpuinfo_x86 *c)
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c); check_null_seg_clears_base(c);
/*
* Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
* using the trampoline code and as part of it, MSR_EFER gets prepared there in
* order to be replicated onto them. Regardless, set it here again, if not set,
* to protect against any future refactoring/code reorganization which might
* miss setting this important bit.
*/
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
cpu_has(c, X86_FEATURE_AUTOIBRS))
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -784,8 +784,7 @@ static int __init nospectre_v1_cmdline(char *str) ...@@ -784,8 +784,7 @@ static int __init nospectre_v1_cmdline(char *str)
} }
early_param("nospectre_v1", nospectre_v1_cmdline); early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
SPECTRE_V2_NONE;
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "RETBleed: " fmt #define pr_fmt(fmt) "RETBleed: " fmt
...@@ -1133,13 +1132,6 @@ spectre_v2_parse_user_cmdline(void) ...@@ -1133,13 +1132,6 @@ spectre_v2_parse_user_cmdline(void)
return SPECTRE_V2_USER_CMD_AUTO; return SPECTRE_V2_USER_CMD_AUTO;
} }
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{
return mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE;
}
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{ {
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
......
...@@ -121,6 +121,7 @@ static const struct x86_cpu_id ppin_cpuids[] = { ...@@ -121,6 +121,7 @@ static const struct x86_cpu_id ppin_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
......
...@@ -83,4 +83,12 @@ unsigned int aperfmperf_get_khz(int cpu); ...@@ -83,4 +83,12 @@ unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void); extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void); extern void update_srbds_msr(void);
extern enum spectre_v2_mitigation spectre_v2_enabled;
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{
return mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE;
}
#endif /* ARCH_X86_CPU_H */ #endif /* ARCH_X86_CPU_H */
...@@ -1451,31 +1451,13 @@ void handle_bus_lock(struct pt_regs *regs) ...@@ -1451,31 +1451,13 @@ void handle_bus_lock(struct pt_regs *regs)
} }
/* /*
* Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should * CPU models that are known to have the per-core split-lock detection
* only be trusted if it is confirmed that a CPU model implements a
* specific feature at a particular bit position.
*
* The possible driver data field values:
*
* - 0: CPU models that are known to have the per-core split-lock detection
* feature even though they do not enumerate IA32_CORE_CAPABILITIES. * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
*
* - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
* bit 5 to enumerate the per-core split-lock detection feature.
*/ */
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 1),
{} {}
}; };
...@@ -1487,24 +1469,27 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c) ...@@ -1487,24 +1469,27 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c)
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return; return;
/* Check for CPUs that have support but do not enumerate it: */
m = x86_match_cpu(split_lock_cpu_ids); m = x86_match_cpu(split_lock_cpu_ids);
if (!m) if (m)
return; goto supported;
switch (m->driver_data) {
case 0:
break;
case 1:
if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
return; return;
/*
* Not all bits in MSR_IA32_CORE_CAPS are architectural, but
* MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set
* it have split lock detection.
*/
rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)) if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
return; goto supported;
break;
default: /* CPU is not in the model list and does not have the MSR bit: */
return; return;
}
supported:
cpu_model_supports_sld = true; cpu_model_supports_sld = true;
__split_lock_setup(); __split_lock_setup();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment