Commit 5a9f5443 authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: X86: Introduce kvm_vcpu_exit_request() helper

Introduce kvm_vcpu_exit_request() helper, we need to check some conditions
before enter guest again immediately, we skip invoking the exit handler and
go through full run loop if complete fastpath but there is stuff preventing
we enter guest again immediately.
Tested-by: default avatarHaiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Message-Id: <1588055009-12677-5-git-send-email-wanpengli@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2c4c4132
...@@ -1573,6 +1573,13 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) ...@@ -1573,6 +1573,13 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
need_resched() || signal_pending(current);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
/* /*
* The fast path for frequent and performance sensitive wrmsr emulation, * The fast path for frequent and performance sensitive wrmsr emulation,
* i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
...@@ -8396,8 +8403,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8396,8 +8403,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
kvm_x86_ops.sync_pir_to_irr(vcpu); kvm_x86_ops.sync_pir_to_irr(vcpu);
if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) if (kvm_vcpu_exit_request(vcpu)) {
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
local_irq_enable(); local_irq_enable();
......
...@@ -364,5 +364,6 @@ static inline bool kvm_dr7_valid(u64 data) ...@@ -364,5 +364,6 @@ static inline bool kvm_dr7_valid(u64 data)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu); u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment