Commit 07853adc authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Borislav Petkov

KVM: VMX: Prevent RSB underflow before vmenter

On VMX, there are some balanced returns between the time the guest's
SPEC_CTRL value is written, and the vmenter.

Balanced returns (matched by a preceding call) are usually ok, but it's
at least theoretically possible an NMI with a deep call stack could
empty the RSB before one of the returns.

For maximum paranoia, don't allow *any* returns (balanced or otherwise)
between the SPEC_CTRL write and the vmenter.

  [ bp: Fix 32-bit build. ]
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent 9756bba2
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/tdx.h> #include <asm/tdx.h>
#include "../kvm/vmx/vmx.h"
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -107,4 +108,9 @@ static void __used common(void) ...@@ -107,4 +108,9 @@ static void __used common(void)
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2); OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
BLANK();
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
}
} }
...@@ -196,8 +196,8 @@ void __init check_bugs(void) ...@@ -196,8 +196,8 @@ void __init check_bugs(void)
} }
/* /*
* NOTE: For VMX, this function is not called in the vmexit path. * NOTE: This function is *only* called for SVM. VMX spec_ctrl handling is
* It uses vmx_spec_ctrl_restore_host() instead. * done in vmenter.S.
*/ */
void void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
#include <asm/vmx.h> #include <asm/vmx.h>
#include "lapic.h" #include "../lapic.h"
#include "x86.h" #include "../x86.h"
extern bool __read_mostly enable_vpid; extern bool __read_mostly enable_vpid;
extern bool __read_mostly flexpriority_enabled; extern bool __read_mostly flexpriority_enabled;
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/bitsperlong.h> #include <asm/bitsperlong.h>
#include <asm/kvm_vcpu_regs.h> #include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/percpu.h>
#include <asm/segment.h> #include <asm/segment.h>
#include "run_flags.h" #include "run_flags.h"
...@@ -73,6 +75,33 @@ SYM_FUNC_START(__vmx_vcpu_run) ...@@ -73,6 +75,33 @@ SYM_FUNC_START(__vmx_vcpu_run)
lea (%_ASM_SP), %_ASM_ARG2 lea (%_ASM_SP), %_ASM_ARG2
call vmx_update_host_rsp call vmx_update_host_rsp
ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL
/*
* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
* host's, write the MSR.
*
* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
* there must not be any returns or indirect branches between this code
* and vmentry.
*/
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
movl VMX_spec_ctrl(%_ASM_DI), %edi
movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
cmp %edi, %esi
je .Lspec_ctrl_done
mov $MSR_IA32_SPEC_CTRL, %ecx
xor %edx, %edx
mov %edi, %eax
wrmsr
.Lspec_ctrl_done:
/*
* Since vmentry is serializing on affected CPUs, there's no need for
* an LFENCE to stop speculation from skipping the wrmsr.
*/
/* Load @regs to RAX. */ /* Load @regs to RAX. */
mov (%_ASM_SP), %_ASM_AX mov (%_ASM_SP), %_ASM_AX
......
...@@ -6988,14 +6988,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6988,14 +6988,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_wait_lapic_expire(vcpu); kvm_wait_lapic_expire(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
* it's non-zero. Since vmentry is serialising on affected CPUs, there
* is no need to worry about the conditional branch over the wrmsr
* being speculatively taken.
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
/* The actual VMENTER/EXIT is in the .noinstr.text section. */ /* The actual VMENTER/EXIT is in the .noinstr.text section. */
vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx)); vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
......
...@@ -8,11 +8,11 @@ ...@@ -8,11 +8,11 @@
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
#include "capabilities.h" #include "capabilities.h"
#include "kvm_cache_regs.h" #include "../kvm_cache_regs.h"
#include "posted_intr.h" #include "posted_intr.h"
#include "vmcs.h" #include "vmcs.h"
#include "vmx_ops.h" #include "vmx_ops.h"
#include "cpuid.h" #include "../cpuid.h"
#include "run_flags.h" #include "run_flags.h"
#define MSR_TYPE_R 1 #define MSR_TYPE_R 1
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "evmcs.h" #include "evmcs.h"
#include "vmcs.h" #include "vmcs.h"
#include "x86.h" #include "../x86.h"
asmlinkage void vmread_error(unsigned long field, bool fault); asmlinkage void vmread_error(unsigned long field, bool fault);
__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field, __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment