Commit ccf8d687 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Fold fallback path into triggering posted IRQ helper

Move the fallback "wake_up" path into the helper to trigger posted
interrupt helper now that the nested and non-nested paths are identical.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211208015236.1616697-20-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 296aa266
...@@ -3931,7 +3931,7 @@ static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu) ...@@ -3931,7 +3931,7 @@ static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
pt_update_intercept_for_msr(vcpu); pt_update_intercept_for_msr(vcpu);
} }
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
int pi_vec) int pi_vec)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -3962,10 +3962,15 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, ...@@ -3962,10 +3962,15 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
*/ */
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
return true; return;
} }
#endif #endif
return false; /*
* The vCPU isn't in the guest; wake the vCPU in case it is blocking,
* otherwise do nothing as KVM will grab the highest priority pending
* IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
*/
kvm_vcpu_wake_up(vcpu);
} }
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
...@@ -3995,8 +4000,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, ...@@ -3995,8 +4000,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
smp_mb__after_atomic(); smp_mb__after_atomic();
/* the PIR and ON have been set by L1. */ /* the PIR and ON have been set by L1. */
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR)) kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
kvm_vcpu_wake_up(vcpu);
return 0; return 0;
} }
return -1; return -1;
...@@ -4033,9 +4037,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) ...@@ -4033,9 +4037,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
* guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
* posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE. * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
*/ */
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR)) kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
kvm_vcpu_wake_up(vcpu);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment