Commit 7b7bd87d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Uninline nested_vmx_reflect_vmexit(), i.e. move it to nested.c

Uninline nested_vmx_reflect_vmexit() in preparation of refactoring
nested_vmx_exit_reflected() to split up the reflection logic into more
consumable chunks, e.g. VM-Fail vs. L1 wants the exit vs. L0 always
handles the exit.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200415175519.14230-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 789afc5c
...@@ -5646,7 +5646,7 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) ...@@ -5646,7 +5646,7 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
* should handle it ourselves in L0 (and then continue L2). Only call this * should handle it ourselves in L0 (and then continue L2). Only call this
* when in is_guest_mode (L2). * when in is_guest_mode (L2).
*/ */
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
{ {
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -5813,6 +5813,38 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) ...@@ -5813,6 +5813,38 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
} }
} }
/*
* Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
* reflected into L1.
*/
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
{
u32 exit_intr_info;
if (!nested_vmx_exit_reflected(vcpu, exit_reason))
return false;
/*
* At this point, the exit interruption info in exit_intr_info
* is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
* we need to query the in-kernel LAPIC.
*/
WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
if ((exit_intr_info &
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vmcs12->vm_exit_intr_error_code =
vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
}
nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
vmcs_readl(EXIT_QUALIFICATION));
return true;
}
static int vmx_get_nested_state(struct kvm_vcpu *vcpu, static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state __user *user_kvm_nested_state,
......
...@@ -25,7 +25,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void); ...@@ -25,7 +25,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry); bool from_vmentry);
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason); bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason);
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info, unsigned long exit_qualification); u32 exit_intr_info, unsigned long exit_qualification);
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu); void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
...@@ -80,40 +80,6 @@ static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) ...@@ -80,40 +80,6 @@ static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT; return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
} }
/*
* Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
* reflected into L1.
*/
static inline bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu,
u32 exit_reason)
{
u32 exit_intr_info;
if (!nested_vmx_exit_reflected(vcpu, exit_reason))
return false;
/*
* At this point, the exit interruption info in exit_intr_info
* is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
* we need to query the in-kernel LAPIC.
*/
WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
if ((exit_intr_info &
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vmcs12->vm_exit_intr_error_code =
vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
}
nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
vmcs_readl(EXIT_QUALIFICATION));
return true;
}
/* /*
* Return the cr0 value that a nested guest would read. This is a combination * Return the cr0 value that a nested guest would read. This is a combination
* of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment