Commit 1c96dcce authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: fix apic_accept_events vs check_nested_events

vmx_apic_init_signal_blocked is buggy in that it returns true
even in VMX non-root mode.  In non-root mode, however, INITs
are not latched, they just cause a vmexit.  Previously,
KVM was waiting for them to be processed when kvm_apic_accept_events
and in the meanwhile it ate the SIPIs that the processor received.

However, in order to implement the wait-for-SIPI activity state,
KVM will have to process KVM_APIC_SIPI in vmx_check_nested_events,
and it will not be possible anymore to disregard SIPIs in non-root
mode as the code is currently doing.

By calling kvm_x86_ops.nested_ops->check_events, we can force a vmexit
(with the side-effect of latching INITs) before incorrectly injecting
an INIT or SIPI in a guest, and therefore vmx_apic_init_signal_blocked
can do the right thing.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7a873e45
...@@ -2843,14 +2843,35 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2843,14 +2843,35 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u8 sipi_vector; u8 sipi_vector;
int r;
unsigned long pe; unsigned long pe;
if (!lapic_in_kernel(vcpu) || !apic->pending_events) if (!lapic_in_kernel(vcpu))
return;
/*
* Read pending events before calling the check_events
* callback.
*/
pe = smp_load_acquire(&apic->pending_events);
if (!pe)
return; return;
if (is_guest_mode(vcpu)) {
r = kvm_x86_ops.nested_ops->check_events(vcpu);
if (r < 0)
return;
/*
* If an event has happened and caused a vmexit,
* we know INITs are latched and therefore
* we will not incorrectly deliver an APIC
* event instead of a vmexit.
*/
}
/* /*
* INITs are latched while CPU is in specific states * INITs are latched while CPU is in specific states
* (SMM, VMX non-root mode, SVM with GIF=0). * (SMM, VMX root mode, SVM with GIF=0).
* Because a CPU cannot be in these states immediately * Because a CPU cannot be in these states immediately
* after it has processed an INIT signal (and thus in * after it has processed an INIT signal (and thus in
* KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
...@@ -2858,13 +2879,13 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2858,13 +2879,13 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
*/ */
if (kvm_vcpu_latch_init(vcpu)) { if (kvm_vcpu_latch_init(vcpu)) {
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) if (test_bit(KVM_APIC_SIPI, &pe))
clear_bit(KVM_APIC_SIPI, &apic->pending_events); clear_bit(KVM_APIC_SIPI, &apic->pending_events);
return; return;
} }
pe = xchg(&apic->pending_events, 0);
if (test_bit(KVM_APIC_INIT, &pe)) { if (test_bit(KVM_APIC_INIT, &pe)) {
clear_bit(KVM_APIC_INIT, &apic->pending_events);
kvm_vcpu_reset(vcpu, true); kvm_vcpu_reset(vcpu, true);
if (kvm_vcpu_is_bsp(apic->vcpu)) if (kvm_vcpu_is_bsp(apic->vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
...@@ -2873,6 +2894,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2873,6 +2894,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
} }
if (test_bit(KVM_APIC_SIPI, &pe) && if (test_bit(KVM_APIC_SIPI, &pe) &&
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
clear_bit(KVM_APIC_SIPI, &apic->pending_events);
/* evaluate pending_events before reading the vector */ /* evaluate pending_events before reading the vector */
smp_rmb(); smp_rmb();
sipi_vector = apic->sipi_vector; sipi_vector = apic->sipi_vector;
......
...@@ -7558,7 +7558,7 @@ static void enable_smi_window(struct kvm_vcpu *vcpu) ...@@ -7558,7 +7558,7 @@ static void enable_smi_window(struct kvm_vcpu *vcpu)
static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{ {
return to_vmx(vcpu)->nested.vmxon; return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
} }
static void vmx_migrate_timers(struct kvm_vcpu *vcpu) static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment