Commit 2c1f3323 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Split VM-Exit reflection logic into L0 vs. L1 wants

Split the logic that determines whether a nested VM-Exit is reflected
into L1 into "L0 wants" and "L1 wants" to document the core control flow
at a high level.  If L0 wants the VM-Exit, e.g. because the exit is due
to a hardware event that isn't passed through to L1, then KVM should
handle the exit in L0 without considering L1's configuration.  Then, if
L0 doesn't want the exit, KVM needs to query L1's wants to determine
whether or not L1 "caused" the exit, e.g. by setting an exiting control,
versus the exit occurring due to an L0 setting, e.g. when L0 intercepts
an action that L1 chose to pass-through.

Note, this adds an extra read on vmcs.VM_EXIT_INTR_INFO for exception.
This will be addressed in a future patch via a VMX-wide enhancement,
rather than pile on another case where vmx->exit_intr_info is
conditionally available.
Suggested-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200415175519.14230-6-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 236871b6
...@@ -5642,34 +5642,85 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) ...@@ -5642,34 +5642,85 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
} }
/* /*
* Return true if we should exit from L2 to L1 to handle an exit, or false if we * Return true if L0 wants to handle an exit from L2 regardless of whether or not
* should handle it ourselves in L0 (and then continue L2). Only call this * L1 wants the exit. Only call this when in is_guest_mode (L2).
* when in is_guest_mode (L2).
*/ */
static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info; u32 intr_info;
switch (exit_reason) { switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI: case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmcs_read32(VM_EXIT_INTR_INFO); intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
if (is_nmi(intr_info)) if (is_nmi(intr_info))
return false; return true;
else if (is_page_fault(intr_info)) else if (is_page_fault(intr_info))
return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; return vcpu->arch.apf.host_apf_reason || !enable_ept;
else if (is_debug(intr_info) && else if (is_debug(intr_info) &&
vcpu->guest_debug & vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
return false; return true;
else if (is_breakpoint(intr_info) && else if (is_breakpoint(intr_info) &&
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return true;
return false; return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
case EXIT_REASON_MCE_DURING_VMENTRY:
return true;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
* used, and the nested mmu code discovers that the address is
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
return true;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
* table (shadow on EPT) or a merged EPT table that L0 built
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
return true;
case EXIT_REASON_PREEMPTION_TIMER:
return true;
case EXIT_REASON_PML_FULL:
/* We emulate PML support to L1. */
return true;
case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */
return true;
case EXIT_REASON_ENCLS:
/* SGX is never exposed to L1 */
return true;
default:
break;
}
return false;
}
/*
* Return 1 if L1 wants to intercept an exit from L2. Only call this when in
* is_guest_mode (L2).
*/
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info;
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
if (is_nmi(intr_info))
return true;
else if (is_page_fault(intr_info))
return true;
return vmcs12->exception_bitmap & return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK)); (1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT: case EXIT_REASON_EXTERNAL_INTERRUPT:
return false; return nested_exit_on_intr(vcpu);
case EXIT_REASON_TRIPLE_FAULT: case EXIT_REASON_TRIPLE_FAULT:
return true; return true;
case EXIT_REASON_INTERRUPT_WINDOW: case EXIT_REASON_INTERRUPT_WINDOW:
...@@ -5734,7 +5785,7 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) ...@@ -5734,7 +5785,7 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
nested_cpu_has2(vmcs12, nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING); SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY: case EXIT_REASON_MCE_DURING_VMENTRY:
return false; return true;
case EXIT_REASON_TPR_BELOW_THRESHOLD: case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS: case EXIT_REASON_APIC_ACCESS:
...@@ -5746,22 +5797,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) ...@@ -5746,22 +5797,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
* delivery" only come from vmcs12. * delivery" only come from vmcs12.
*/ */
return true; return true;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
* used, and the nested mmu code discovers that the address is
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
return false;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
* table (shadow on EPT) or a merged EPT table that L0 built
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
return false;
case EXIT_REASON_INVPCID: case EXIT_REASON_INVPCID:
return return
nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
...@@ -5778,17 +5813,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) ...@@ -5778,17 +5813,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
* the XSS exit bitmap in vmcs12. * the XSS exit bitmap in vmcs12.
*/ */
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PREEMPTION_TIMER:
return false;
case EXIT_REASON_PML_FULL:
/* We emulate PML support to L1. */
return false;
case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */
return false;
case EXIT_REASON_ENCLS:
/* SGX is never exposed to L1 */
return false;
case EXIT_REASON_UMWAIT: case EXIT_REASON_UMWAIT:
case EXIT_REASON_TPAUSE: case EXIT_REASON_TPAUSE:
return nested_cpu_has2(vmcs12, return nested_cpu_has2(vmcs12,
...@@ -5830,7 +5854,12 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason) ...@@ -5830,7 +5854,12 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE), vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX); KVM_ISA_VMX);
if (!nested_vmx_exit_reflected(vcpu, exit_reason)) /* If L0 (KVM) wants the exit, it trumps L1's desires. */
if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
return false;
/* If L1 doesn't want the exit, handle it in L0. */
if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
return false; return false;
/* /*
...@@ -6162,7 +6191,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) ...@@ -6162,7 +6191,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
* reason is that if one of these bits is necessary, it will appear * reason is that if one of these bits is necessary, it will appear
* in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
* fields of vmcs01 and vmcs02, will turn these bits off - and * fields of vmcs01 and vmcs02, will turn these bits off - and
* nested_vmx_exit_reflected() will not pass related exits to L1. * nested_vmx_l1_wants_exit() will not pass related exits to L1.
* These rules have exceptions below. * These rules have exceptions below.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment