Commit 5497b955 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: add tracepoint for failed nested VM-Enter

Debugging a failed VM-Enter is often like searching for a needle in a
haystack, e.g. there are over 80 consistency checks that funnel into
the "invalid control field" error code.  One way to expedite debug is
to run the buggy code as an L1 guest under KVM (and pray that the
failing check is detected by KVM).  However, extracting useful debug
information out of L0 KVM requires attaching a debugger to KVM and/or
modifying the source, e.g. to log which check is failing.

Make life a little less painful for VMM developers and add a tracepoint
for failed VM-Enter consistency checks.  Ideally the tracepoint would
capture both what check failed and precisely why it failed, but logging
why a checked failed is difficult to do in a generic tracepoint without
resorting to invasive techniques, e.g. generating a custom string on
failure.  That being said, for the vast majority of VM-Enter failures
the most difficult step is figuring out exactly what to look at, e.g.
figuring out which bit was incorrectly set in a control field is usually
not too painful once the guilty field as been identified.

To reach a happy medium between precision and ease of use, simply log
the code that detected a failed check, using a macro to execute the
check and log the trace event on failure.  This approach enables tracing
arbitrary code, e.g. it's not limited to function calls or specific
formats of checks, and the changes to the existing code are minimally
invasive.  A macro with a two-character name is desirable as usage of
the macro doesn't result in overly long lines or confusing alignment,
while still retaining some amount of readability.  I.e. a one-character
name is a little too terse, and a three-character name results in the
contents being passed to the macro aligning with an indented line when
the macro is used an in if-statement, e.g.:

        if (VCC(nested_vmx_check_long_line_one(...) &&
                nested_vmx_check_long_line_two(...)))
                return -EINVAL;

And that is the story of how the CC(), a.k.a. Consistency Check, macro
got its name.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a061985b
...@@ -1475,6 +1475,24 @@ TRACE_EVENT(kvm_pv_tlb_flush, ...@@ -1475,6 +1475,24 @@ TRACE_EVENT(kvm_pv_tlb_flush,
__entry->need_flush_tlb ? "true" : "false") __entry->need_flush_tlb ? "true" : "false")
); );
/*
* Tracepoint for failed nested VMX VM-Enter.
*/
TRACE_EVENT(kvm_nested_vmenter_failed,
TP_PROTO(const char *msg),
TP_ARGS(msg),
TP_STRUCT__entry(
__field(const char *, msg)
),
TP_fast_assign(
__entry->msg = msg;
),
TP_printk("%s", __entry->msg)
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -19,6 +19,14 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); ...@@ -19,6 +19,14 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
static bool __read_mostly nested_early_check = 0; static bool __read_mostly nested_early_check = 0;
module_param(nested_early_check, bool, S_IRUGO); module_param(nested_early_check, bool, S_IRUGO);
#define CC(consistency_check) \
({ \
bool failed = (consistency_check); \
if (failed) \
trace_kvm_nested_vmenter_failed(#consistency_check); \
failed; \
})
/* /*
* Hyper-V requires all of these, so mark them as supported even though * Hyper-V requires all of these, so mark them as supported even though
* they are just treated the same as all-context. * they are just treated the same as all-context.
...@@ -430,8 +438,8 @@ static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, ...@@ -430,8 +438,8 @@ static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return 0; return 0;
if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
!page_address_valid(vcpu, vmcs12->io_bitmap_b)) CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -443,7 +451,7 @@ static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, ...@@ -443,7 +451,7 @@ static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return 0; return 0;
if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -455,7 +463,7 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, ...@@ -455,7 +463,7 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
return 0; return 0;
if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -688,7 +696,7 @@ static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, ...@@ -688,7 +696,7 @@ static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
!page_address_valid(vcpu, vmcs12->apic_access_addr)) CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
return -EINVAL; return -EINVAL;
else else
return 0; return 0;
...@@ -707,16 +715,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, ...@@ -707,16 +715,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
* If virtualize x2apic mode is enabled, * If virtualize x2apic mode is enabled,
* virtualize apic access must be disabled. * virtualize apic access must be disabled.
*/ */
if (nested_cpu_has_virt_x2apic_mode(vmcs12) && if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
return -EINVAL; return -EINVAL;
/* /*
* If virtual interrupt delivery is enabled, * If virtual interrupt delivery is enabled,
* we must exit on external interrupts. * we must exit on external interrupts.
*/ */
if (nested_cpu_has_vid(vmcs12) && if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
!nested_exit_on_intr(vcpu))
return -EINVAL; return -EINVAL;
/* /*
...@@ -727,15 +734,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, ...@@ -727,15 +734,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
* bits 5:0 of posted_intr_desc_addr should be zero. * bits 5:0 of posted_intr_desc_addr should be zero.
*/ */
if (nested_cpu_has_posted_intr(vmcs12) && if (nested_cpu_has_posted_intr(vmcs12) &&
(!nested_cpu_has_vid(vmcs12) || (CC(!nested_cpu_has_vid(vmcs12)) ||
!nested_exit_intr_ack_set(vcpu) || CC(!nested_exit_intr_ack_set(vcpu)) ||
(vmcs12->posted_intr_nv & 0xff00) || CC((vmcs12->posted_intr_nv & 0xff00)) ||
(vmcs12->posted_intr_desc_addr & 0x3f) || CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
(vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
return -EINVAL; return -EINVAL;
/* tpr shadow is needed by all apicv features. */ /* tpr shadow is needed by all apicv features. */
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -759,10 +766,12 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, ...@@ -759,10 +766,12 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count, if (CC(nested_vmx_check_msr_switch(vcpu,
vmcs12->vm_exit_msr_load_addr) || vmcs12->vm_exit_msr_load_count,
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count, vmcs12->vm_exit_msr_load_addr)) ||
vmcs12->vm_exit_msr_store_addr)) CC(nested_vmx_check_msr_switch(vcpu,
vmcs12->vm_exit_msr_store_count,
vmcs12->vm_exit_msr_store_addr)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -771,8 +780,9 @@ static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, ...@@ -771,8 +780,9 @@ static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, if (CC(nested_vmx_check_msr_switch(vcpu,
vmcs12->vm_entry_msr_load_addr)) vmcs12->vm_entry_msr_load_count,
vmcs12->vm_entry_msr_load_addr)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -784,8 +794,8 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, ...@@ -784,8 +794,8 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
if (!nested_cpu_has_pml(vmcs12)) if (!nested_cpu_has_pml(vmcs12))
return 0; return 0;
if (!nested_cpu_has_ept(vmcs12) || if (CC(!nested_cpu_has_ept(vmcs12)) ||
!page_address_valid(vcpu, vmcs12->pml_address)) CC(!page_address_valid(vcpu, vmcs12->pml_address)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -794,8 +804,8 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, ...@@ -794,8 +804,8 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
!nested_cpu_has_ept(vmcs12)) !nested_cpu_has_ept(vmcs12)))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
...@@ -803,8 +813,8 @@ static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, ...@@ -803,8 +813,8 @@ static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
!nested_cpu_has_ept(vmcs12)) !nested_cpu_has_ept(vmcs12)))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
...@@ -815,8 +825,8 @@ static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, ...@@ -815,8 +825,8 @@ static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
if (!nested_cpu_has_shadow_vmcs(vmcs12)) if (!nested_cpu_has_shadow_vmcs(vmcs12))
return 0; return 0;
if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) || if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)) CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -826,12 +836,12 @@ static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, ...@@ -826,12 +836,12 @@ static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e) struct vmx_msr_entry *e)
{ {
/* x2APIC MSR accesses are not allowed */ /* x2APIC MSR accesses are not allowed */
if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
return -EINVAL; return -EINVAL;
if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
e->index == MSR_IA32_UCODE_REV) CC(e->index == MSR_IA32_UCODE_REV))
return -EINVAL; return -EINVAL;
if (e->reserved != 0) if (CC(e->reserved != 0))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
...@@ -839,9 +849,9 @@ static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, ...@@ -839,9 +849,9 @@ static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e) struct vmx_msr_entry *e)
{ {
if (e->index == MSR_FS_BASE || if (CC(e->index == MSR_FS_BASE) ||
e->index == MSR_GS_BASE || CC(e->index == MSR_GS_BASE) ||
e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
nested_vmx_msr_check_common(vcpu, e)) nested_vmx_msr_check_common(vcpu, e))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -850,7 +860,7 @@ static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, ...@@ -850,7 +860,7 @@ static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e) struct vmx_msr_entry *e)
{ {
if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
nested_vmx_msr_check_common(vcpu, e)) nested_vmx_msr_check_common(vcpu, e))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -949,7 +959,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne ...@@ -949,7 +959,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
u32 *entry_failure_code) u32 *entry_failure_code)
{ {
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
if (!nested_cr3_valid(vcpu, cr3)) { if (CC(!nested_cr3_valid(vcpu, cr3))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT; *entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL; return -EINVAL;
} }
...@@ -959,7 +969,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne ...@@ -959,7 +969,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* must not be dereferenced. * must not be dereferenced.
*/ */
if (is_pae_paging(vcpu) && !nested_ept) { if (is_pae_paging(vcpu) && !nested_ept) {
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
*entry_failure_code = ENTRY_FAIL_PDPTE; *entry_failure_code = ENTRY_FAIL_PDPTE;
return -EINVAL; return -EINVAL;
} }
...@@ -2405,12 +2415,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2405,12 +2415,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
{ {
if (!nested_cpu_has_nmi_exiting(vmcs12) && if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
nested_cpu_has_virtual_nmis(vmcs12)) nested_cpu_has_virtual_nmis(vmcs12)))
return -EINVAL; return -EINVAL;
if (!nested_cpu_has_virtual_nmis(vmcs12) && if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)) nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -2424,11 +2434,11 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) ...@@ -2424,11 +2434,11 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
/* Check for memory type validity */ /* Check for memory type validity */
switch (address & VMX_EPTP_MT_MASK) { switch (address & VMX_EPTP_MT_MASK) {
case VMX_EPTP_MT_UC: case VMX_EPTP_MT_UC:
if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)) if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
return false; return false;
break; break;
case VMX_EPTP_MT_WB: case VMX_EPTP_MT_WB:
if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)) if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
return false; return false;
break; break;
default: default:
...@@ -2436,16 +2446,16 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) ...@@ -2436,16 +2446,16 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
} }
/* only 4 levels page-walk length are valid */ /* only 4 levels page-walk length are valid */
if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) if (CC((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4))
return false; return false;
/* Reserved bits should not be set */ /* Reserved bits should not be set */
if (address >> maxphyaddr || ((address >> 7) & 0x1f)) if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f)))
return false; return false;
/* AD, if set, should be supported */ /* AD, if set, should be supported */
if (address & VMX_EPTP_AD_ENABLE_BIT) { if (address & VMX_EPTP_AD_ENABLE_BIT) {
if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)) if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
return false; return false;
} }
...@@ -2460,21 +2470,21 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, ...@@ -2460,21 +2470,21 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control, if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
vmx->nested.msrs.pinbased_ctls_low, vmx->nested.msrs.pinbased_ctls_low,
vmx->nested.msrs.pinbased_ctls_high) || vmx->nested.msrs.pinbased_ctls_high)) ||
!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.msrs.procbased_ctls_low, vmx->nested.msrs.procbased_ctls_low,
vmx->nested.msrs.procbased_ctls_high)) vmx->nested.msrs.procbased_ctls_high)))
return -EINVAL; return -EINVAL;
if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
!vmx_control_verify(vmcs12->secondary_vm_exec_control, CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
vmx->nested.msrs.secondary_ctls_low, vmx->nested.msrs.secondary_ctls_low,
vmx->nested.msrs.secondary_ctls_high)) vmx->nested.msrs.secondary_ctls_high)))
return -EINVAL; return -EINVAL;
if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) || if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
...@@ -2485,7 +2495,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, ...@@ -2485,7 +2495,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
return -EINVAL; return -EINVAL;
if (!nested_cpu_has_preemption_timer(vmcs12) && if (!nested_cpu_has_preemption_timer(vmcs12) &&
...@@ -2493,17 +2503,17 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, ...@@ -2493,17 +2503,17 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
if (nested_cpu_has_ept(vmcs12) && if (nested_cpu_has_ept(vmcs12) &&
!valid_ept_address(vcpu, vmcs12->ept_pointer)) CC(!valid_ept_address(vcpu, vmcs12->ept_pointer)))
return -EINVAL; return -EINVAL;
if (nested_cpu_has_vmfunc(vmcs12)) { if (nested_cpu_has_vmfunc(vmcs12)) {
if (vmcs12->vm_function_control & if (CC(vmcs12->vm_function_control &
~vmx->nested.msrs.vmfunc_controls) ~vmx->nested.msrs.vmfunc_controls))
return -EINVAL; return -EINVAL;
if (nested_cpu_has_eptp_switching(vmcs12)) { if (nested_cpu_has_eptp_switching(vmcs12)) {
if (!nested_cpu_has_ept(vmcs12) || if (CC(!nested_cpu_has_ept(vmcs12)) ||
!page_address_valid(vcpu, vmcs12->eptp_list_address)) CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
return -EINVAL; return -EINVAL;
} }
} }
...@@ -2519,10 +2529,10 @@ static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, ...@@ -2519,10 +2529,10 @@ static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx_control_verify(vmcs12->vm_exit_controls, if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
vmx->nested.msrs.exit_ctls_low, vmx->nested.msrs.exit_ctls_low,
vmx->nested.msrs.exit_ctls_high) || vmx->nested.msrs.exit_ctls_high)) ||
nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)) CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -2536,9 +2546,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, ...@@ -2536,9 +2546,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx_control_verify(vmcs12->vm_entry_controls, if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
vmx->nested.msrs.entry_ctls_low, vmx->nested.msrs.entry_ctls_low,
vmx->nested.msrs.entry_ctls_high)) vmx->nested.msrs.entry_ctls_high)))
return -EINVAL; return -EINVAL;
/* /*
...@@ -2558,31 +2568,31 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, ...@@ -2558,31 +2568,31 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
/* VM-entry interruption-info field: interruption type */ /* VM-entry interruption-info field: interruption type */
if (intr_type == INTR_TYPE_RESERVED || if (CC(intr_type == INTR_TYPE_RESERVED) ||
(intr_type == INTR_TYPE_OTHER_EVENT && CC(intr_type == INTR_TYPE_OTHER_EVENT &&
!nested_cpu_supports_monitor_trap_flag(vcpu))) !nested_cpu_supports_monitor_trap_flag(vcpu)))
return -EINVAL; return -EINVAL;
/* VM-entry interruption-info field: vector */ /* VM-entry interruption-info field: vector */
if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
return -EINVAL; return -EINVAL;
/* VM-entry interruption-info field: deliver error code */ /* VM-entry interruption-info field: deliver error code */
should_have_error_code = should_have_error_code =
intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
x86_exception_has_error_code(vector); x86_exception_has_error_code(vector);
if (has_error_code != should_have_error_code) if (CC(has_error_code != should_have_error_code))
return -EINVAL; return -EINVAL;
/* VM-entry exception error code */ /* VM-entry exception error code */
if (has_error_code && if (CC(has_error_code &&
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)))
return -EINVAL; return -EINVAL;
/* VM-entry interruption-info field: reserved bits */ /* VM-entry interruption-info field: reserved bits */
if (intr_info & INTR_INFO_RESVD_BITS_MASK) if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
return -EINVAL; return -EINVAL;
/* VM-entry instruction length */ /* VM-entry instruction length */
...@@ -2590,9 +2600,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, ...@@ -2590,9 +2600,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
case INTR_TYPE_SOFT_EXCEPTION: case INTR_TYPE_SOFT_EXCEPTION:
case INTR_TYPE_SOFT_INTR: case INTR_TYPE_SOFT_INTR:
case INTR_TYPE_PRIV_SW_EXCEPTION: case INTR_TYPE_PRIV_SW_EXCEPTION:
if ((vmcs12->vm_entry_instruction_len > 15) || if (CC(vmcs12->vm_entry_instruction_len > 15) ||
(vmcs12->vm_entry_instruction_len == 0 && CC(vmcs12->vm_entry_instruction_len == 0 &&
!nested_cpu_has_zero_length_injection(vcpu))) CC(!nested_cpu_has_zero_length_injection(vcpu))))
return -EINVAL; return -EINVAL;
} }
} }
...@@ -2619,40 +2629,40 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, ...@@ -2619,40 +2629,40 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
{ {
bool ia32e; bool ia32e;
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3)) CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
return -EINVAL; return -EINVAL;
if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) || if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)) CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
return -EINVAL; return -EINVAL;
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
!kvm_pat_valid(vmcs12->host_ia32_pat)) CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
return -EINVAL; return -EINVAL;
ia32e = (vmcs12->vm_exit_controls & ia32e = (vmcs12->vm_exit_controls &
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
if (vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) || CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
vmcs12->host_cs_selector == 0 || CC(vmcs12->host_cs_selector == 0) ||
vmcs12->host_tr_selector == 0 || CC(vmcs12->host_tr_selector == 0) ||
(vmcs12->host_ss_selector == 0 && !ia32e)) CC(vmcs12->host_ss_selector == 0 && !ia32e))
return -EINVAL; return -EINVAL;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_noncanonical_address(vmcs12->host_fs_base, vcpu) || if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
is_noncanonical_address(vmcs12->host_gs_base, vcpu) || CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
is_noncanonical_address(vmcs12->host_gdtr_base, vcpu) || CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
is_noncanonical_address(vmcs12->host_idtr_base, vcpu) || CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
is_noncanonical_address(vmcs12->host_tr_base, vcpu)) CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)))
return -EINVAL; return -EINVAL;
#endif #endif
...@@ -2663,9 +2673,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, ...@@ -2663,9 +2673,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
* the host address-space size VM-exit control. * the host address-space size VM-exit control.
*/ */
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
return -EINVAL; return -EINVAL;
} }
...@@ -2682,16 +2692,16 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, ...@@ -2682,16 +2692,16 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
if (vmcs12->vmcs_link_pointer == -1ull) if (vmcs12->vmcs_link_pointer == -1ull)
return 0; return 0;
if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)) if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
return -EINVAL; return -EINVAL;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
return -EINVAL; return -EINVAL;
shadow = map.hva; shadow = map.hva;
if (shadow->hdr.revision_id != VMCS12_REVISION || if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)) CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
r = -EINVAL; r = -EINVAL;
kvm_vcpu_unmap(vcpu, &map, false); kvm_vcpu_unmap(vcpu, &map, false);
...@@ -2703,8 +2713,8 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, ...@@ -2703,8 +2713,8 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
*/ */
static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
{ {
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -2718,12 +2728,12 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, ...@@ -2718,12 +2728,12 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
*exit_qual = ENTRY_FAIL_DEFAULT; *exit_qual = ENTRY_FAIL_DEFAULT;
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
return -EINVAL; return -EINVAL;
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
!kvm_pat_valid(vmcs12->guest_ia32_pat)) CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
return -EINVAL; return -EINVAL;
if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
...@@ -2743,16 +2753,16 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, ...@@ -2743,16 +2753,16 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
if (to_vmx(vcpu)->nested.nested_run_pending && if (to_vmx(vcpu)->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
((vmcs12->guest_cr0 & X86_CR0_PG) && CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
return -EINVAL; return -EINVAL;
} }
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
(vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
return -EINVAL; return -EINVAL;
if (nested_check_guest_non_reg_state(vmcs12)) if (nested_check_guest_non_reg_state(vmcs12))
......
...@@ -10128,6 +10128,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); ...@@ -10128,6 +10128,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment