Commit 52817587 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/cpufeatures: Disentangle SSBD enumeration

The SSBD enumeration is similarly to the other bits magically shared
between Intel and AMD though the mechanisms are different.

Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
features or family dependent setup.

Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
controlled via MSR_SPEC_CTRL and fix up the usage sites.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 7eb8956a
...@@ -207,15 +207,14 @@ ...@@ -207,15 +207,14 @@
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
...@@ -339,7 +338,7 @@ ...@@ -339,7 +338,7 @@
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
/* /*
* BUG word(s) * BUG word(s)
......
...@@ -570,8 +570,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -570,8 +570,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
* avoid RMW. If that faults, do not enable SSBD. * avoid RMW. If that faults, do not enable SSBD.
*/ */
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
setup_force_cpu_cap(X86_FEATURE_SSBD); setup_force_cpu_cap(X86_FEATURE_SSBD);
setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
} }
} }
...@@ -919,11 +919,6 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -919,11 +919,6 @@ static void init_amd(struct cpuinfo_x86 *c)
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
if (!cpu_has(c, X86_FEATURE_XENPV)) if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
set_cpu_cap(c, X86_FEATURE_SSBD);
set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
}
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -159,8 +159,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) ...@@ -159,8 +159,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
return; return;
/* Intel controls SSB in MSR_SPEC_CTRL */ /* SSBD controlled in MSR_SPEC_CTRL */
if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl) if (host != guest_spec_ctrl)
...@@ -176,8 +176,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) ...@@ -176,8 +176,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
return; return;
/* Intel controls SSB in MSR_SPEC_CTRL */ /* SSBD controlled in MSR_SPEC_CTRL */
if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl) if (host != guest_spec_ctrl)
...@@ -189,7 +189,7 @@ static void x86_amd_ssb_disable(void) ...@@ -189,7 +189,7 @@ static void x86_amd_ssb_disable(void)
{ {
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval); wrmsrl(MSR_AMD64_LS_CFG, msrval);
} }
......
...@@ -767,6 +767,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c) ...@@ -767,6 +767,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
set_cpu_cap(c, X86_FEATURE_STIBP); set_cpu_cap(c, X86_FEATURE_STIBP);
if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
set_cpu_cap(c, X86_FEATURE_SSBD);
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_IBRS);
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
......
...@@ -191,6 +191,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -191,6 +191,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
setup_clear_cpu_cap(X86_FEATURE_SSBD); setup_clear_cpu_cap(X86_FEATURE_SSBD);
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
} }
/* /*
......
...@@ -283,7 +283,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn ...@@ -283,7 +283,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
{ {
u64 msr; u64 msr;
if (static_cpu_has(X86_FEATURE_AMD_SSBD)) { if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrl(MSR_AMD64_LS_CFG, msr); wrmsrl(MSR_AMD64_LS_CFG, msr);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment