Commit fa8ac498 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/bugs: Expose x86_spec_ctrl_base directly

x86_spec_ctrl_base is the system wide default value for the SPEC_CTRL MSR.
x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
prevent modification to that variable. Though the variable is read only
after init and globaly visible already.

Remove the function and export the variable instead.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent cc69b349
...@@ -217,16 +217,7 @@ enum spectre_v2_mitigation { ...@@ -217,16 +217,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_IBRS, SPECTRE_V2_IBRS,
}; };
/*
* The Intel specification for the SPEC_CTRL MSR requires that we
* preserve any already set reserved bits at boot time (e.g. for
* future additions that this kernel is not currently aware of).
* We then set any additional mitigation bits that we want
* ourselves and always use this as the base for SPEC_CTRL.
* We also use this when handling guest entry/exit as below.
*/
extern void x86_spec_ctrl_set(u64); extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void);
/* The Speculative Store Bypass disable variants */ /* The Speculative Store Bypass disable variants */
enum ssb_mitigation { enum ssb_mitigation {
...@@ -278,6 +269,9 @@ static inline void indirect_branch_prediction_barrier(void) ...@@ -278,6 +269,9 @@ static inline void indirect_branch_prediction_barrier(void)
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
} }
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
/* /*
* With retpoline, we must use IBRS to restrict branch prediction * With retpoline, we must use IBRS to restrict branch prediction
* before calling into firmware. * before calling into firmware.
...@@ -286,7 +280,7 @@ static inline void indirect_branch_prediction_barrier(void) ...@@ -286,7 +280,7 @@ static inline void indirect_branch_prediction_barrier(void)
*/ */
#define firmware_restrict_branch_speculation_start() \ #define firmware_restrict_branch_speculation_start() \
do { \ do { \
u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
\ \
preempt_disable(); \ preempt_disable(); \
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
...@@ -295,7 +289,7 @@ do { \ ...@@ -295,7 +289,7 @@ do { \
#define firmware_restrict_branch_speculation_end() \ #define firmware_restrict_branch_speculation_end() \
do { \ do { \
u64 val = x86_spec_ctrl_get_default(); \ u64 val = x86_spec_ctrl_base; \
\ \
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
X86_FEATURE_USE_IBRS_FW); \ X86_FEATURE_USE_IBRS_FW); \
......
...@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) ...@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
extern u64 x86_amd_ls_cfg_base; extern u64 x86_amd_ls_cfg_base;
extern u64 x86_amd_ls_cfg_ssbd_mask; extern u64 x86_amd_ls_cfg_ssbd_mask;
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
{ {
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
......
...@@ -36,6 +36,7 @@ static void __init ssb_select_mitigation(void); ...@@ -36,6 +36,7 @@ static void __init ssb_select_mitigation(void);
* writes to SPEC_CTRL contain whatever reserved bits have been set. * writes to SPEC_CTRL contain whatever reserved bits have been set.
*/ */
u64 __ro_after_init x86_spec_ctrl_base; u64 __ro_after_init x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/* /*
* The vendor and possibly platform specific bits which can be modified in * The vendor and possibly platform specific bits which can be modified in
...@@ -141,16 +142,6 @@ void x86_spec_ctrl_set(u64 val) ...@@ -141,16 +142,6 @@ void x86_spec_ctrl_set(u64 val)
} }
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
u64 x86_spec_ctrl_get_default(void)
{
u64 msrval = x86_spec_ctrl_base;
if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
return msrval;
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
void void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment