Commit b2656e4d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Wrap VM-Fail valid path in generic VM-Fail helper

Add nested_vmx_fail() to wrap VM-Fail paths that _may_ result in VM-Fail
Valid to make it clear at the call sites that the Valid flavor isn't
guaranteed.
Suggested-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200609015607.6994-1-sean.j.christopherson@intel.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c967118d
...@@ -171,15 +171,6 @@ static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) ...@@ -171,15 +171,6 @@ static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
static int nested_vmx_failValid(struct kvm_vcpu *vcpu, static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
u32 vm_instruction_error) u32 vm_instruction_error)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* failValid writes the error number to the current VMCS, which
* can't be done if there isn't a current VMCS.
*/
if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
return nested_vmx_failInvalid(vcpu);
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_SF | X86_EFLAGS_OF)) X86_EFLAGS_SF | X86_EFLAGS_OF))
...@@ -192,6 +183,20 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu, ...@@ -192,6 +183,20 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* failValid writes the error number to the current VMCS, which
* can't be done if there isn't a current VMCS.
*/
if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
return nested_vmx_failInvalid(vcpu);
return nested_vmx_failValid(vcpu, vm_instruction_error);
}
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
{ {
/* TODO: not to reset guest simply here. */ /* TODO: not to reset guest simply here. */
...@@ -3493,19 +3498,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -3493,19 +3498,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* when using the merged vmcs02. * when using the merged vmcs02.
*/ */
if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
if (vmcs12->launch_state == launch) if (vmcs12->launch_state == launch)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS); : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
if (nested_vmx_check_controls(vcpu, vmcs12)) if (nested_vmx_check_controls(vcpu, vmcs12))
return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
if (nested_vmx_check_host_state(vcpu, vmcs12)) if (nested_vmx_check_host_state(vcpu, vmcs12))
return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
/* /*
* We're finally done with prerequisite checking, and can start with * We're finally done with prerequisite checking, and can start with
...@@ -3554,7 +3558,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -3554,7 +3558,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (status == NVMX_VMENTRY_VMEXIT) if (status == NVMX_VMENTRY_VMEXIT)
return 1; return 1;
WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
} }
/* /*
...@@ -4497,7 +4501,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, ...@@ -4497,7 +4501,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
* flag and the VM-instruction error field of the VMCS * flag and the VM-instruction error field of the VMCS
* accordingly, and skip the emulated instruction. * accordingly, and skip the emulated instruction.
*/ */
(void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
/* /*
* Restore L1's host state to KVM's software model. We're here * Restore L1's host state to KVM's software model. We're here
...@@ -4797,8 +4801,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -4797,8 +4801,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
} }
if (vmx->nested.vmxon) if (vmx->nested.vmxon)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) { != VMXON_NEEDED_FEATURES) {
...@@ -4889,12 +4892,10 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) ...@@ -4889,12 +4892,10 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
return r; return r;
if (!page_address_valid(vcpu, vmptr)) if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
VMXERR_VMCLEAR_INVALID_ADDRESS);
if (vmptr == vmx->nested.vmxon_ptr) if (vmptr == vmx->nested.vmxon_ptr)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
VMXERR_VMCLEAR_VMXON_POINTER);
/* /*
* When Enlightened VMEntry is enabled on the calling CPU we treat * When Enlightened VMEntry is enabled on the calling CPU we treat
...@@ -4964,8 +4965,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -4964,8 +4965,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
offset = vmcs_field_to_offset(field); offset = vmcs_field_to_offset(field);
if (offset < 0) if (offset < 0)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
VMXERR_UNSUPPORTED_VMCS_COMPONENT);
if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
...@@ -5068,8 +5068,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -5068,8 +5068,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
offset = vmcs_field_to_offset(field); offset = vmcs_field_to_offset(field);
if (offset < 0) if (offset < 0)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
VMXERR_UNSUPPORTED_VMCS_COMPONENT);
/* /*
* If the vCPU supports "VMWRITE to any supported field in the * If the vCPU supports "VMWRITE to any supported field in the
...@@ -5077,8 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -5077,8 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
*/ */
if (vmcs_field_readonly(field) && if (vmcs_field_readonly(field) &&
!nested_cpu_has_vmwrite_any_field(vcpu)) !nested_cpu_has_vmwrite_any_field(vcpu))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
/* /*
* Ensure vmcs12 is up-to-date before any VMWRITE that dirties * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
...@@ -5153,12 +5151,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -5153,12 +5151,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return r; return r;
if (!page_address_valid(vcpu, vmptr)) if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
VMXERR_VMPTRLD_INVALID_ADDRESS);
if (vmptr == vmx->nested.vmxon_ptr) if (vmptr == vmx->nested.vmxon_ptr)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
VMXERR_VMPTRLD_VMXON_POINTER);
/* Forbid normal VMPTRLD if Enlightened version was used */ /* Forbid normal VMPTRLD if Enlightened version was used */
if (vmx->nested.hv_evmcs) if (vmx->nested.hv_evmcs)
...@@ -5175,7 +5171,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -5175,7 +5171,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
* given physical address won't match the required * given physical address won't match the required
* VMCS12_REVISION identifier. * VMCS12_REVISION identifier.
*/ */
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
} }
...@@ -5185,7 +5181,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -5185,7 +5181,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
(new_vmcs12->hdr.shadow_vmcs && (new_vmcs12->hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) { !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
kvm_vcpu_unmap(vcpu, &map, false); kvm_vcpu_unmap(vcpu, &map, false);
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
} }
...@@ -5270,8 +5266,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -5270,8 +5266,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
if (type >= 32 || !(types & (1 << type))) if (type >= 32 || !(types & (1 << type)))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
/* According to the Intel VMX instruction reference, the memory /* According to the Intel VMX instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global) * operand is read even if it isn't needed (e.g., for type==global)
...@@ -5292,7 +5287,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -5292,7 +5287,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
switch (type) { switch (type) {
case VMX_EPT_EXTENT_CONTEXT: case VMX_EPT_EXTENT_CONTEXT:
if (!nested_vmx_check_eptp(vcpu, operand.eptp)) if (!nested_vmx_check_eptp(vcpu, operand.eptp))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
roots_to_free = 0; roots_to_free = 0;
...@@ -5352,7 +5347,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -5352,7 +5347,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
if (type >= 32 || !(types & (1 << type))) if (type >= 32 || !(types & (1 << type)))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
/* according to the intel vmx instruction reference, the memory /* according to the intel vmx instruction reference, the memory
...@@ -5366,7 +5361,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -5366,7 +5361,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return vmx_handle_memory_failure(vcpu, r, &e); return vmx_handle_memory_failure(vcpu, r, &e);
if (operand.vpid >> 16) if (operand.vpid >> 16)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
vpid02 = nested_get_vpid02(vcpu); vpid02 = nested_get_vpid02(vcpu);
...@@ -5374,14 +5369,14 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -5374,14 +5369,14 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
if (!operand.vpid || if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu)) is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
vpid_sync_vcpu_addr(vpid02, operand.gla); vpid_sync_vcpu_addr(vpid02, operand.gla);
break; break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
if (!operand.vpid) if (!operand.vpid)
return nested_vmx_failValid(vcpu, return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
vpid_sync_context(vpid02); vpid_sync_context(vpid02);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment