Commit 54aa83c9 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: do not set st->preempted when going back to user space

Similar to the Xen path, only change the vCPU's reported state if the vCPU
was actually preempted.  The reason for KVM's behavior is that for example
optimistic spinning might not be a good idea if the guest is doing repeated
exits to userspace; however, it is confusing and unlikely to make a difference,
because well-tuned guests will hardly ever exit KVM_RUN in the first place.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 11d39e8c
...@@ -4654,19 +4654,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -4654,19 +4654,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
int idx; int idx;
if (vcpu->preempted && !vcpu->arch.guest_state_protected) if (vcpu->preempted) {
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); if (!vcpu->arch.guest_state_protected)
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
/* /*
* Take the srcu lock as memslots will be accessed to check the gfn * Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation. * cache generation against the memslots generation.
*/ */
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_xen_msr_enabled(vcpu->kvm)) if (kvm_xen_msr_enabled(vcpu->kvm))
kvm_xen_runstate_set_preempted(vcpu); kvm_xen_runstate_set_preempted(vcpu);
else else
kvm_steal_time_set_preempted(vcpu); kvm_steal_time_set_preempted(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
static_call(kvm_x86_vcpu_put)(vcpu); static_call(kvm_x86_vcpu_put)(vcpu);
vcpu->arch.last_host_tsc = rdtsc(); vcpu->arch.last_host_tsc = rdtsc();
......
...@@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) ...@@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
* behalf of the vCPU. Only if the VMM does actually block * behalf of the vCPU. Only if the VMM does actually block
* does it need to enter RUNSTATE_blocked. * does it need to enter RUNSTATE_blocked.
*/ */
if (vcpu->preempted) if (WARN_ON_ONCE(!vcpu->preempted))
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); return;
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
} }
/* 32-bit compatibility definitions, also used natively in 32-bit build */ /* 32-bit compatibility definitions, also used natively in 32-bit build */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment