Commit 999b295a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman

x86/speculation: Rename SSBD update functions

commit 26c4d75b upstream

During context switch, the SSBD bit in SPEC_CTRL MSR is updated according
to changes of the TIF_SSBD flag in the current and next running task.

Currently, only the bit controlling speculative store bypass disable in
SPEC_CTRL MSR is updated and the related update functions all have
"speculative_store" or "ssb" in their names.

For enhanced mitigation control other bits in SPEC_CTRL MSR need to be
updated as well, which makes the SSB names inadequate.

Rename the "speculative_store*" functions to a more generic name. No
functional change.
Signed-off-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20181125185004.058866968@linutronix.deSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent aca2ddbc
...@@ -70,11 +70,11 @@ extern void speculative_store_bypass_ht_init(void); ...@@ -70,11 +70,11 @@ extern void speculative_store_bypass_ht_init(void);
static inline void speculative_store_bypass_ht_init(void) { } static inline void speculative_store_bypass_ht_init(void) { }
#endif #endif
extern void speculative_store_bypass_update(unsigned long tif); extern void speculation_ctrl_update(unsigned long tif);
static inline void speculative_store_bypass_update_current(void) static inline void speculation_ctrl_update_current(void)
{ {
speculative_store_bypass_update(current_thread_info()->flags); speculation_ctrl_update(current_thread_info()->flags);
} }
#endif #endif
...@@ -199,7 +199,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) ...@@ -199,7 +199,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
ssbd_spec_ctrl_to_tif(hostval); ssbd_spec_ctrl_to_tif(hostval);
speculative_store_bypass_update(tif); speculation_ctrl_update(tif);
} }
} }
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
...@@ -629,7 +629,7 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -629,7 +629,7 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
* mitigation until it is next scheduled. * mitigation until it is next scheduled.
*/ */
if (task == current && update) if (task == current && update)
speculative_store_bypass_update_current(); speculation_ctrl_update_current();
return 0; return 0;
} }
......
...@@ -398,27 +398,27 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) ...@@ -398,27 +398,27 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
} }
static __always_inline void intel_set_ssb_state(unsigned long tifn) static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
{ {
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr); wrmsrl(MSR_IA32_SPEC_CTRL, msr);
} }
static __always_inline void __speculative_store_bypass_update(unsigned long tifn) static __always_inline void __speculation_ctrl_update(unsigned long tifn)
{ {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
amd_set_ssb_virt_state(tifn); amd_set_ssb_virt_state(tifn);
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn); amd_set_core_ssb_state(tifn);
else else
intel_set_ssb_state(tifn); spec_ctrl_update_msr(tifn);
} }
void speculative_store_bypass_update(unsigned long tif) void speculation_ctrl_update(unsigned long tif)
{ {
preempt_disable(); preempt_disable();
__speculative_store_bypass_update(tif); __speculation_ctrl_update(tif);
preempt_enable(); preempt_enable();
} }
...@@ -455,7 +455,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, ...@@ -455,7 +455,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
if ((tifp ^ tifn) & _TIF_SSBD) if ((tifp ^ tifn) & _TIF_SSBD)
__speculative_store_bypass_update(tifn); __speculation_ctrl_update(tifn);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment