Commit d92a5d1c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Add helpers to wake/query blocking vCPU

Add helpers to wake and query a blocking vCPU.  In addition to providing
nice names, the helpers reduce the probability of KVM neglecting to use
kvm_arch_vcpu_get_wait().

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-20-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cdafece4
...@@ -649,7 +649,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -649,7 +649,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map; struct timer_map map;
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
if (unlikely(!timer->enabled)) if (unlikely(!timer->enabled))
return; return;
...@@ -672,7 +671,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -672,7 +671,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
if (map.emul_ptimer) if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer); soft_timer_cancel(&map.emul_ptimer->hrtimer);
if (rcuwait_active(wait)) if (kvm_vcpu_is_blocking(vcpu))
kvm_timer_blocking(vcpu); kvm_timer_blocking(vcpu);
/* /*
......
...@@ -631,7 +631,7 @@ void kvm_arm_resume_guest(struct kvm *kvm) ...@@ -631,7 +631,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false; vcpu->arch.pause = false;
rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); __kvm_vcpu_wake_up(vcpu);
} }
} }
......
...@@ -1931,7 +1931,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) ...@@ -1931,7 +1931,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
/* If the preempt notifier has already run, it also called apic_timer_expired */ /* If the preempt notifier has already run, it also called apic_timer_expired */
if (!apic->lapic_timer.hv_timer_in_use) if (!apic->lapic_timer.hv_timer_in_use)
goto out; goto out;
WARN_ON(rcuwait_active(&vcpu->wait)); WARN_ON(kvm_vcpu_is_blocking(vcpu));
apic_timer_expired(apic, false); apic_timer_expired(apic, false);
cancel_hv_timer(apic); cancel_hv_timer(apic);
......
...@@ -1286,6 +1286,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) ...@@ -1286,6 +1286,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
#endif #endif
} }
/*
* Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
* true if the vCPU was blocking and was awakened, false otherwise.
*/
static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
}
static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
{
return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
}
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/* /*
* returns true if the virtual interrupt controller is initialized and * returns true if the virtual interrupt controller is initialized and
......
...@@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) ...@@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, cr2_or_gpa); trace_kvm_async_pf_completed(addr, cr2_or_gpa);
rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); __kvm_vcpu_wake_up(vcpu);
mmput(mm); mmput(mm);
kvm_put_kvm(vcpu->kvm); kvm_put_kvm(vcpu->kvm);
......
...@@ -3403,10 +3403,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt); ...@@ -3403,10 +3403,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{ {
struct rcuwait *waitp; if (__kvm_vcpu_wake_up(vcpu)) {
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp)) {
WRITE_ONCE(vcpu->ready, true); WRITE_ONCE(vcpu->ready, true);
++vcpu->stat.generic.halt_wakeup; ++vcpu->stat.generic.halt_wakeup;
return true; return true;
...@@ -3574,8 +3571,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) ...@@ -3574,8 +3571,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue; continue;
if (vcpu == me) if (vcpu == me)
continue; continue;
if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) && if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
!vcpu_dy_runnable(vcpu))
continue; continue;
if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
!kvm_arch_dy_has_pending_interrupt(vcpu) && !kvm_arch_dy_has_pending_interrupt(vcpu) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment