Commit c43203ca authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Radim Krčmář

KVM: x86: avoid simultaneous queueing of both IRQ and SMI

If the processor exits to KVM while delivering an interrupt,
the hypervisor then requeues the interrupt for the next vmentry.
Trying to enter SMM in this same window causes to enter non-root
mode in emulated SMM (i.e. with IF=0) and with a request to
inject an IRQ (i.e. with a valid VM-entry interrupt info field).
This is invalid guest state (SDM 26.3.1.4 "Check on Guest RIP
and RFLAGS") and the processor fails vmentry.

The fix is to defer the injection from KVM_REQ_SMI to KVM_REQ_EVENT,
like we already do for e.g. NMIs.  This patch doesn't change the
name of the process_smi function so that it can be applied to
stable releases.  The next patch will modify the names so that
process_nmi and process_smi handle respectively KVM_REQ_NMI and
KVM_REQ_SMI.

This is especially common with Windows, probably due to the
self-IPI trick that it uses to deliver deferred procedure
calls (DPCs).
Reported-by: default avatarLaszlo Ersek <lersek@redhat.com>
Reported-by: default avatarMichał Zegan <webczat_200@poczta.onet.pl>
Fixes: 64d60670
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 4340fa55
...@@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); ...@@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu);
static void process_smi(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
struct kvm_x86_ops *kvm_x86_ops __read_mostly; struct kvm_x86_ops *kvm_x86_ops __read_mostly;
...@@ -5302,13 +5303,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu) ...@@ -5302,13 +5303,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
/* This is a good place to trace that we are exiting SMM. */ /* This is a good place to trace that we are exiting SMM. */
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
if (unlikely(vcpu->arch.smi_pending)) { /* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_SMI, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.smi_pending = 0;
} else {
/* Process a latched INIT, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
} }
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
...@@ -6108,7 +6104,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -6108,7 +6104,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
} }
/* try to inject new event if pending */ /* try to inject new event if pending */
if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
vcpu->arch.smi_pending = false;
process_smi(vcpu);
} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending; --vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true; vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu); kvm_x86_ops->set_nmi(vcpu);
...@@ -6318,11 +6317,6 @@ static void process_smi(struct kvm_vcpu *vcpu) ...@@ -6318,11 +6317,6 @@ static void process_smi(struct kvm_vcpu *vcpu)
char buf[512]; char buf[512];
u32 cr0; u32 cr0;
if (is_smm(vcpu)) {
vcpu->arch.smi_pending = true;
return;
}
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
vcpu->arch.hflags |= HF_SMM_MASK; vcpu->arch.hflags |= HF_SMM_MASK;
memset(buf, 0, 512); memset(buf, 0, 512);
...@@ -6385,6 +6379,12 @@ static void process_smi(struct kvm_vcpu *vcpu) ...@@ -6385,6 +6379,12 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
} }
static void process_smi_request(struct kvm_vcpu *vcpu)
{
vcpu->arch.smi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
void kvm_make_scan_ioapic_request(struct kvm *kvm) void kvm_make_scan_ioapic_request(struct kvm *kvm)
{ {
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
...@@ -6506,7 +6506,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6506,7 +6506,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu); record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu)) if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu); process_smi_request(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu)) if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu); process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu)) if (kvm_check_request(KVM_REQ_PMU, vcpu))
...@@ -6579,8 +6579,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6579,8 +6579,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0) if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true; req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */
else { else {
/* Enable NMI/IRQ window open exits if needed.
*
* SMIs have two cases: 1) they can be nested, and
* then there is nothing to do here because RSM will
* cause a vmexit anyway; 2) or the SMI can be pending
* because inject_pending_event has completed the
* injection of an IRQ or NMI from the previous vmexit,
* and then we request an immediate exit to inject the SMI.
*/
if (vcpu->arch.smi_pending && !is_smm(vcpu))
req_immediate_exit = true;
if (vcpu->arch.nmi_pending) if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu); kvm_x86_ops->enable_nmi_window(vcpu);
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
...@@ -6631,8 +6641,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6631,8 +6641,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_load_guest_xcr0(vcpu); kvm_load_guest_xcr0(vcpu);
if (req_immediate_exit) if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
smp_send_reschedule(vcpu->cpu); smp_send_reschedule(vcpu->cpu);
}
trace_kvm_entry(vcpu->vcpu_id); trace_kvm_entry(vcpu->vcpu_id);
wait_lapic_expire(vcpu); wait_lapic_expire(vcpu);
...@@ -7433,6 +7445,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -7433,6 +7445,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{ {
vcpu->arch.hflags = 0; vcpu->arch.hflags = 0;
vcpu->arch.smi_pending = 0;
atomic_set(&vcpu->arch.nmi_queued, 0); atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false; vcpu->arch.nmi_injected = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment