Commit f5c694cf authored by Wanpeng Li's avatar Wanpeng Li Committed by Greg Kroah-Hartman

KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use

commit 56cc2406 upstream.

After commit 77b0f5d6 (KVM: nVMX: Ack and write vector info to intr_info
if L1 asks us to), "Acknowledge interrupt on exit" behavior can be
emulated. To do so, KVM will ask the APIC for the interrupt vector if
during a nested vmexit if VM_EXIT_ACK_INTR_ON_EXIT is set.  With APICv,
kvm_get_apic_interrupt would return -1 and give the following WARNING:

Call Trace:
 [<ffffffff81493563>] dump_stack+0x49/0x5e
 [<ffffffff8103f0eb>] warn_slowpath_common+0x7c/0x96
 [<ffffffffa059709a>] ? nested_vmx_vmexit+0xa4/0x233 [kvm_intel]
 [<ffffffff8103f11a>] warn_slowpath_null+0x15/0x17
 [<ffffffffa059709a>] nested_vmx_vmexit+0xa4/0x233 [kvm_intel]
 [<ffffffffa0594295>] ? nested_vmx_exit_handled+0x6a/0x39e [kvm_intel]
 [<ffffffffa0537931>] ? kvm_apic_has_interrupt+0x80/0xd5 [kvm]
 [<ffffffffa05972ec>] vmx_check_nested_events+0xc3/0xd3 [kvm_intel]
 [<ffffffffa051ebe9>] inject_pending_event+0xd0/0x16e [kvm]
 [<ffffffffa051efa0>] vcpu_enter_guest+0x319/0x704 [kvm]

To fix this, we cannot rely on the processor's virtual interrupt delivery,
because "acknowledge interrupt on exit" must only update the virtual
ISR/PPR/IRR registers (and SVI, which is just a cache of the virtual ISR)
but it should not deliver the interrupt through the IDT.  Thus, KVM has
to deliver the interrupt "by hand", similar to the treatment of EOI in
commit fc57ac2c (KVM: lapic: sync highest ISR to hardware apic on
EOI, 2014-05-14).

The patch modifies kvm_cpu_get_interrupt to always acknowledge an
interrupt; there are only two callers, and the other is not affected
because it is never reached with kvm_apic_vid_enabled() == true.  Then it
modifies apic_set_isr and apic_clear_irr to update SVI and RVI in addition
to the registers.
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Suggested-by: default avatar"Zhang, Yang Z" <yang.z.zhang@intel.com>
Tested-by: default avatarLiu, RongrongX <rongrongx.liu@intel.com>
Tested-by: default avatarFelipe Reyes <freyes@suse.com>
Fixes: 77b0f5d6Signed-off-by: default avatarWanpeng Li <wanpeng.li@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d376ef46
...@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) ...@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
vector = kvm_cpu_get_extint(v); vector = kvm_cpu_get_extint(v);
if (kvm_apic_vid_enabled(v->kvm) || vector != -1) if (vector != -1)
return vector; /* PIC */ return vector; /* PIC */
return kvm_get_apic_interrupt(v); /* APIC */ return kvm_get_apic_interrupt(v); /* APIC */
......
...@@ -352,17 +352,37 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic) ...@@ -352,17 +352,37 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{ {
apic->irr_pending = false; struct kvm_vcpu *vcpu;
vcpu = apic->vcpu;
apic_clear_vector(vec, apic->regs + APIC_IRR); apic_clear_vector(vec, apic->regs + APIC_IRR);
if (apic_search_irr(apic) != -1) if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
apic->irr_pending = true; /* try to update RVI */
kvm_make_request(KVM_REQ_EVENT, vcpu);
else {
vec = apic_search_irr(apic);
apic->irr_pending = (vec != -1);
}
} }
static inline void apic_set_isr(int vec, struct kvm_lapic *apic) static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{ {
/* Note that we never get here with APIC virtualization enabled. */ struct kvm_vcpu *vcpu;
if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
return;
if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) vcpu = apic->vcpu;
/*
* With APIC virtualization enabled, all caching is disabled
* because the processor can modify ISR under the hood. Instead
* just set SVI.
*/
if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
else {
++apic->isr_count; ++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR); BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
/* /*
...@@ -371,6 +391,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) ...@@ -371,6 +391,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* the highest bit in ISR. * the highest bit in ISR.
*/ */
apic->highest_isr_cache = vec; apic->highest_isr_cache = vec;
}
} }
static inline int apic_find_highest_isr(struct kvm_lapic *apic) static inline int apic_find_highest_isr(struct kvm_lapic *apic)
...@@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) ...@@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
int vector = kvm_apic_has_interrupt(vcpu); int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
/* Note that we never get here with APIC virtualization enabled. */
if (vector == -1) if (vector == -1)
return -1; return -1;
/*
* We get here even with APIC virtualization enabled, if doing
* nested virtualization and L1 runs with the "acknowledge interrupt
* on exit" mode. Then we cannot inject the interrupt via RVI,
* because the process would deliver it through the IDT.
*/
apic_set_isr(vector, apic); apic_set_isr(vector, apic);
apic_update_ppr(apic); apic_update_ppr(apic);
apic_clear_irr(vector, apic); apic_clear_irr(vector, apic);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment