Commit a633e41e authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: assimilate nested_vmx_entry_failure() into nested_vmx_enter_non_root_mode()

Handling all VMExits due to failed consistency checks on VMEnter in
nested_vmx_enter_non_root_mode() consolidates all relevant code into
a single location, and removing nested_vmx_entry_failure() eliminates
a confusing function name and label.  For a VMEntry, "fail" and its
derivatives has a very specific meaning due to the different behavior
of a VMEnter VMFail versus VMExit, i.e. it wasn't obvious that
nested_vmx_entry_failure() handled VMExit scenarios.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7671ce21
...@@ -2048,9 +2048,6 @@ static inline bool is_nmi(u32 intr_info) ...@@ -2048,9 +2048,6 @@ static inline bool is_nmi(u32 intr_info)
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info, u32 exit_intr_info,
unsigned long exit_qualification); unsigned long exit_qualification);
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification);
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{ {
...@@ -12640,26 +12637,29 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -12640,26 +12637,29 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 0; return 0;
} }
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
/* /*
* If exit_qual is NULL, this is being called from state restore (either RSM * If from_vmentry is false, this is being called from state restore (either RSM
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
*/ */
static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
bool from_vmentry = !!exit_qual;
u32 dummy_exit_qual;
bool evaluate_pending_interrupts; bool evaluate_pending_interrupts;
int r = 0; u32 exit_reason = EXIT_REASON_INVALID_STATE;
u32 exit_qual;
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, exit_qual)) if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
return EXIT_REASON_INVALID_STATE; goto vmentry_fail_vmexit;
enter_guest_mode(vcpu); enter_guest_mode(vcpu);
...@@ -12674,18 +12674,17 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) ...@@ -12674,18 +12674,17 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset; vcpu->arch.tsc_offset += vmcs12->tsc_offset;
r = EXIT_REASON_INVALID_STATE; if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
goto fail; goto fail;
if (from_vmentry) { if (from_vmentry) {
nested_get_vmcs12_pages(vcpu); nested_get_vmcs12_pages(vcpu);
r = EXIT_REASON_MSR_LOAD_FAIL; exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
*exit_qual = nested_vmx_load_msr(vcpu, exit_qual = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count); vmcs12->vm_entry_msr_load_count);
if (*exit_qual) if (exit_qual)
goto fail; goto fail;
} else { } else {
/* /*
...@@ -12723,12 +12722,28 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) ...@@ -12723,12 +12722,28 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
*/ */
return 0; return 0;
/*
* A failed consistency check that leads to a VMExit during L1's
* VMEnter to L2 is a variation of a normal VMexit, as explained in
* 26.7 "VM-entry failures during or after loading guest state".
*/
fail: fail:
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset; vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu); leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01); vmx_switch_vmcs(vcpu, &vmx->vmcs01);
return r;
if (!from_vmentry)
return 1;
vmentry_fail_vmexit:
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = exit_qual;
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
vmx->nested.sync_shadow_vmcs = true;
return 1;
} }
/* /*
...@@ -12740,7 +12755,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -12740,7 +12755,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
u32 exit_qual;
int ret; int ret;
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
...@@ -12809,9 +12823,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -12809,9 +12823,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
*/ */
vmx->nested.nested_run_pending = 1; vmx->nested.nested_run_pending = 1;
ret = nested_vmx_enter_non_root_mode(vcpu, &exit_qual); ret = nested_vmx_enter_non_root_mode(vcpu, true);
if (ret) { if (ret) {
nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
vmx->nested.nested_run_pending = 0; vmx->nested.nested_run_pending = 0;
return 1; return 1;
} }
...@@ -13609,25 +13622,6 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu) ...@@ -13609,25 +13622,6 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
free_nested(to_vmx(vcpu)); free_nested(to_vmx(vcpu));
} }
/*
* L1's failure to enter L2 is a subset of a normal exit, as explained in
* 23.7 "VM-entry failures during or after loading guest state" (this also
* lists the acceptable exit-reason and exit-qualification parameters).
* It should only be called before L2 actually succeeded to run, and when
* vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
*/
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification)
{
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = qualification;
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu, static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info, struct x86_instruction_info *info,
enum x86_intercept_stage stage) enum x86_intercept_stage stage)
...@@ -14051,7 +14045,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) ...@@ -14051,7 +14045,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
if (vmx->nested.smm.guest_mode) { if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK; vcpu->arch.hflags &= ~HF_SMM_MASK;
ret = nested_vmx_enter_non_root_mode(vcpu, NULL); ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu->arch.hflags |= HF_SMM_MASK; vcpu->arch.hflags |= HF_SMM_MASK;
if (ret) if (ret)
return ret; return ret;
...@@ -14257,7 +14251,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -14257,7 +14251,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
vmx->nested.dirty_vmcs12 = true; vmx->nested.dirty_vmcs12 = true;
ret = nested_vmx_enter_non_root_mode(vcpu, NULL); ret = nested_vmx_enter_non_root_mode(vcpu, false);
if (ret) if (ret)
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment