Commit 863e8e65 authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: VMX: Consolidate userspace and kernel interrupt injection for VMX

Use the same callback to inject irq/nmi events no matter what irqchip is
in use. Only from VMX for now.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 8061823a
...@@ -517,7 +517,7 @@ struct kvm_x86_ops { ...@@ -517,7 +517,7 @@ struct kvm_x86_ops {
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
bool (*exception_injected)(struct kvm_vcpu *vcpu); bool (*exception_injected)(struct kvm_vcpu *vcpu);
void (*inject_pending_irq)(struct kvm_vcpu *vcpu); void (*inject_pending_irq)(struct kvm_vcpu *vcpu, struct kvm_run *run);
void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
struct kvm_run *run); struct kvm_run *run);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
......
...@@ -2298,7 +2298,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) ...@@ -2298,7 +2298,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
(svm->vcpu.arch.hflags & HF_GIF_MASK); (svm->vcpu.arch.hflags & HF_GIF_MASK);
} }
static void svm_intr_assist(struct kvm_vcpu *vcpu) static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
......
...@@ -2510,48 +2510,6 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) ...@@ -2510,48 +2510,6 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
return vcpu->arch.interrupt_window_open; return vcpu->arch.interrupt_window_open;
} }
static void do_interrupt_requests(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
vmx_update_window_states(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_STI |
GUEST_INTR_STATE_MOV_SS);
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
if (vcpu->arch.interrupt.pending) {
enable_nmi_window(vcpu);
} else if (vcpu->arch.nmi_window_open) {
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = true;
} else {
enable_nmi_window(vcpu);
return;
}
}
if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu) ||
kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
return;
}
if (vcpu->arch.interrupt_window_open) {
if (kvm_cpu_has_interrupt(vcpu) && !vcpu->arch.interrupt.pending)
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
if (vcpu->arch.interrupt.pending)
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
} else if(kvm_cpu_has_interrupt(vcpu) ||
kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
}
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{ {
int ret; int ret;
...@@ -3351,8 +3309,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) ...@@ -3351,8 +3309,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
} }
} }
static void vmx_intr_assist(struct kvm_vcpu *vcpu) static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{ {
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
kvm_run->request_interrupt_window;
update_tpr_threshold(vcpu); update_tpr_threshold(vcpu);
vmx_update_window_states(vcpu); vmx_update_window_states(vcpu);
...@@ -3373,25 +3334,25 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) ...@@ -3373,25 +3334,25 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
return; return;
} }
} }
if (vcpu->arch.nmi_injected) { if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu); vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending) goto out;
enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu))
enable_irq_window(vcpu);
return;
} }
if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
if (vcpu->arch.interrupt_window_open) if (vcpu->arch.interrupt_window_open)
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
else
enable_irq_window(vcpu);
} }
if (vcpu->arch.interrupt.pending) {
if (vcpu->arch.interrupt.pending)
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
if (kvm_cpu_has_interrupt(vcpu))
out:
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
enable_irq_window(vcpu); enable_irq_window(vcpu);
}
} }
/* /*
...@@ -3733,7 +3694,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -3733,7 +3694,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.queue_exception = vmx_queue_exception, .queue_exception = vmx_queue_exception,
.exception_injected = vmx_exception_injected, .exception_injected = vmx_exception_injected,
.inject_pending_irq = vmx_intr_assist, .inject_pending_irq = vmx_intr_assist,
.inject_pending_vectors = do_interrupt_requests, .inject_pending_vectors = vmx_intr_assist,
.interrupt_allowed = vmx_interrupt_allowed, .interrupt_allowed = vmx_interrupt_allowed,
.set_tss_addr = vmx_set_tss_addr, .set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level, .get_tdp_level = get_ept_level,
......
...@@ -3173,7 +3173,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3173,7 +3173,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.exception.pending) if (vcpu->arch.exception.pending)
__queue_exception(vcpu); __queue_exception(vcpu);
else if (irqchip_in_kernel(vcpu->kvm)) else if (irqchip_in_kernel(vcpu->kvm))
kvm_x86_ops->inject_pending_irq(vcpu); kvm_x86_ops->inject_pending_irq(vcpu, kvm_run);
else else
kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment