Commit a1c77abb authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Properly handle userspace interrupt window request

Return true for vmx_interrupt_allowed() if the vCPU is in L2 and L1 has
external interrupt exiting enabled.  IRQs are never blocked in hardware
if the CPU is in the guest (L2 from L1's perspective) when IRQs trigger
VM-Exit.

The new check percolates up to kvm_vcpu_ready_for_interrupt_injection()
and thus vcpu_run(), and so KVM will exit to userspace if userspace has
requested an interrupt window (to inject an IRQ into L1).

Remove the @external_intr param from vmx_check_nested_events(), which is
actually an indicator that userspace wants an interrupt window, e.g.
it's named @req_int_win further up the stack.  Injecting a VM-Exit into
L1 to try and bounce out to L0 userspace is all kinds of broken and is
no longer necessary.

Remove the hack in nested_vmx_vmexit() that attempted to workaround the
breakage in vmx_check_nested_events() by only filling interrupt info if
there's an actual interrupt pending.  The hack actually made things
worse because it caused KVM to _never_ fill interrupt info when the
LAPIC resides in userspace (kvm_cpu_has_interrupt() queries
interrupt.injected, which is always cleared by prepare_vmcs12() before
reaching the hack in nested_vmx_vmexit()).

Fixes: 6550c4df ("KVM: nVMX: Fix interrupt window request with "Acknowledge interrupt on exit"")
Cc: stable@vger.kernel.org
Cc: Liran Alon <liran.alon@oracle.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b34de572
...@@ -1180,7 +1180,7 @@ struct kvm_x86_ops { ...@@ -1180,7 +1180,7 @@ struct kvm_x86_ops {
bool (*pt_supported)(void); bool (*pt_supported)(void);
bool (*pku_supported)(void); bool (*pku_supported)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu); void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
......
...@@ -3603,7 +3603,7 @@ static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) ...@@ -3603,7 +3603,7 @@ static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
vcpu->arch.exception.payload); vcpu->arch.exception.payload);
} }
static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual; unsigned long exit_qual;
...@@ -3679,8 +3679,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) ...@@ -3679,8 +3679,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0; return 0;
} }
if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
nested_exit_on_intr(vcpu)) {
if (block_nested_events) if (block_nested_events)
return -EBUSY; return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
...@@ -4328,17 +4327,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -4328,17 +4327,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (likely(!vmx->fail)) { if (likely(!vmx->fail)) {
/* if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
* TODO: SDM says that with acknowledge interrupt on nested_exit_intr_ack_set(vcpu)) {
* exit, bit 31 of the VM-exit interrupt information
* (valid interrupt) is always set to 1 on
* EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
* need kvm_cpu_has_interrupt(). See the commit
* message for details.
*/
if (nested_exit_intr_ack_set(vcpu) &&
exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
kvm_cpu_has_interrupt(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu); int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0); WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq | vmcs12->vm_exit_intr_info = irq |
......
...@@ -4493,8 +4493,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) ...@@ -4493,8 +4493,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{ {
return (!to_vmx(vcpu)->nested.nested_run_pending && if (to_vmx(vcpu)->nested.nested_run_pending)
vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && return false;
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
return true;
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
} }
......
...@@ -7579,7 +7579,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) ...@@ -7579,7 +7579,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
} }
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) static int inject_pending_event(struct kvm_vcpu *vcpu)
{ {
int r; int r;
...@@ -7615,7 +7615,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -7615,7 +7615,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* from L2 to L1. * from L2 to L1.
*/ */
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); r = kvm_x86_ops->check_nested_events(vcpu);
if (r != 0) if (r != 0)
return r; return r;
} }
...@@ -7677,7 +7677,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -7677,7 +7677,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* KVM_REQ_EVENT only on certain events and not unconditionally? * KVM_REQ_EVENT only on certain events and not unconditionally?
*/ */
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); r = kvm_x86_ops->check_nested_events(vcpu);
if (r != 0) if (r != 0)
return r; return r;
} }
...@@ -8210,7 +8210,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8210,7 +8210,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
if (inject_pending_event(vcpu, req_int_win) != 0) if (inject_pending_event(vcpu) != 0)
req_immediate_exit = true; req_immediate_exit = true;
else { else {
/* Enable SMI/NMI/IRQ window open exits if needed. /* Enable SMI/NMI/IRQ window open exits if needed.
...@@ -8438,7 +8438,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -8438,7 +8438,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{ {
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
kvm_x86_ops->check_nested_events(vcpu, false); kvm_x86_ops->check_nested_events(vcpu);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted); !vcpu->arch.apf.halted);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment