Commit 66fd3f7f authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: Do not re-execute INTn instruction.

Re-inject event instead. This is what Intel suggest. Also use correct
instruction length when re-injecting soft fault/interrupt.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent f629cf84
...@@ -319,6 +319,8 @@ struct kvm_vcpu_arch { ...@@ -319,6 +319,8 @@ struct kvm_vcpu_arch {
struct kvm_pio_request pio; struct kvm_pio_request pio;
void *pio_data; void *pio_data;
u8 event_exit_inst_len;
struct kvm_queued_exception { struct kvm_queued_exception {
bool pending; bool pending;
bool has_error_code; bool has_error_code;
...@@ -328,6 +330,7 @@ struct kvm_vcpu_arch { ...@@ -328,6 +330,7 @@ struct kvm_vcpu_arch {
struct kvm_queued_interrupt { struct kvm_queued_interrupt {
bool pending; bool pending;
bool soft;
u8 nr; u8 nr;
} interrupt; } interrupt;
...@@ -510,7 +513,7 @@ struct kvm_x86_ops { ...@@ -510,7 +513,7 @@ struct kvm_x86_ops {
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
void (*patch_hypercall)(struct kvm_vcpu *vcpu, void (*patch_hypercall)(struct kvm_vcpu *vcpu,
unsigned char *hypercall_addr); unsigned char *hypercall_addr);
void (*set_irq)(struct kvm_vcpu *vcpu, int vec); void (*set_irq)(struct kvm_vcpu *vcpu);
void (*set_nmi)(struct kvm_vcpu *vcpu); void (*set_nmi)(struct kvm_vcpu *vcpu);
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
......
...@@ -2310,13 +2310,13 @@ static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -2310,13 +2310,13 @@ static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
} }
static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) static void svm_set_irq(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
nested_svm_intr(svm); nested_svm_intr(svm);
svm_queue_irq(vcpu, irq); svm_queue_irq(vcpu, vcpu->arch.interrupt.nr);
} }
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
...@@ -2418,7 +2418,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -2418,7 +2418,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
case SVM_EXITINTINFO_TYPE_EXEPT: case SVM_EXITINTINFO_TYPE_EXEPT:
/* In case of software exception do not reinject an exception /* In case of software exception do not reinject an exception
vector, but re-execute and instruction instead */ vector, but re-execute and instruction instead */
if (vector == BP_VECTOR || vector == OF_VECTOR) if (kvm_exception_is_soft(vector))
break; break;
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err; u32 err = svm->vmcb->control.exit_int_info_err;
...@@ -2428,7 +2428,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -2428,7 +2428,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
kvm_queue_exception(&svm->vcpu, vector); kvm_queue_exception(&svm->vcpu, vector);
break; break;
case SVM_EXITINTINFO_TYPE_INTR: case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector); kvm_queue_interrupt(&svm->vcpu, vector, false);
break; break;
default: default:
break; break;
......
...@@ -801,8 +801,9 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, ...@@ -801,8 +801,9 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
return; return;
} }
if (nr == BP_VECTOR || nr == OF_VECTOR) { if (kvm_exception_is_soft(nr)) {
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmx->vcpu.arch.event_exit_inst_len);
intr_info |= INTR_TYPE_SOFT_EXCEPTION; intr_info |= INTR_TYPE_SOFT_EXCEPTION;
} else } else
intr_info |= INTR_TYPE_HARD_EXCEPTION; intr_info |= INTR_TYPE_HARD_EXCEPTION;
...@@ -2445,9 +2446,11 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) ...@@ -2445,9 +2446,11 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
} }
static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) static void vmx_inject_irq(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
uint32_t intr;
int irq = vcpu->arch.interrupt.nr;
KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
...@@ -2462,8 +2465,14 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) ...@@ -2462,8 +2465,14 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
return; return;
} }
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr = irq | INTR_INFO_VALID_MASK;
irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); if (vcpu->arch.interrupt.soft) {
intr |= INTR_TYPE_SOFT_INTR;
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmx->vcpu.arch.event_exit_inst_len);
} else
intr |= INTR_TYPE_EXT_INTR;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
} }
static void vmx_inject_nmi(struct kvm_vcpu *vcpu) static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
...@@ -3024,6 +3033,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3024,6 +3033,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
break; break;
case INTR_TYPE_EXT_INTR: case INTR_TYPE_EXT_INTR:
case INTR_TYPE_SOFT_INTR:
kvm_clear_interrupt_queue(vcpu); kvm_clear_interrupt_queue(vcpu);
break; break;
case INTR_TYPE_HARD_EXCEPTION: case INTR_TYPE_HARD_EXCEPTION:
...@@ -3295,16 +3305,24 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) ...@@ -3295,16 +3305,24 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
break; break;
case INTR_TYPE_HARD_EXCEPTION:
case INTR_TYPE_SOFT_EXCEPTION: case INTR_TYPE_SOFT_EXCEPTION:
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
/* fall through */
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE); u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
kvm_queue_exception_e(&vmx->vcpu, vector, err); kvm_queue_exception_e(&vmx->vcpu, vector, err);
} else } else
kvm_queue_exception(&vmx->vcpu, vector); kvm_queue_exception(&vmx->vcpu, vector);
break; break;
case INTR_TYPE_SOFT_INTR:
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
/* fall through */
case INTR_TYPE_EXT_INTR: case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(&vmx->vcpu, vector); kvm_queue_interrupt(&vmx->vcpu, vector,
type == INTR_TYPE_SOFT_INTR);
break; break;
default: default:
break; break;
......
...@@ -1441,7 +1441,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, ...@@ -1441,7 +1441,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
return -ENXIO; return -ENXIO;
vcpu_load(vcpu); vcpu_load(vcpu);
kvm_queue_interrupt(vcpu, irq->irq); kvm_queue_interrupt(vcpu, irq->irq, false);
vcpu_put(vcpu); vcpu_put(vcpu);
...@@ -3161,7 +3161,7 @@ static void inject_irq(struct kvm_vcpu *vcpu) ...@@ -3161,7 +3161,7 @@ static void inject_irq(struct kvm_vcpu *vcpu)
} }
if (vcpu->arch.interrupt.pending) { if (vcpu->arch.interrupt.pending) {
kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr); kvm_x86_ops->set_irq(vcpu);
return; return;
} }
...@@ -3174,8 +3174,9 @@ static void inject_irq(struct kvm_vcpu *vcpu) ...@@ -3174,8 +3174,9 @@ static void inject_irq(struct kvm_vcpu *vcpu)
} }
} else if (kvm_cpu_has_interrupt(vcpu)) { } else if (kvm_cpu_has_interrupt(vcpu)) {
if (kvm_x86_ops->interrupt_allowed(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr); false);
kvm_x86_ops->set_irq(vcpu);
} }
} }
} }
...@@ -4098,7 +4099,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -4098,7 +4099,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
pending_vec = find_first_bit( pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits); (const unsigned long *)sregs->interrupt_bitmap, max_bits);
if (pending_vec < max_bits) { if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec); kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec); pr_debug("Set back pending irq %d\n", pending_vec);
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm))
kvm_pic_clear_isr_ack(vcpu->kvm); kvm_pic_clear_isr_ack(vcpu->kvm);
......
...@@ -8,9 +8,11 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) ...@@ -8,9 +8,11 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
vcpu->arch.exception.pending = false; vcpu->arch.exception.pending = false;
} }
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector) static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
bool soft)
{ {
vcpu->arch.interrupt.pending = true; vcpu->arch.interrupt.pending = true;
vcpu->arch.interrupt.soft = soft;
vcpu->arch.interrupt.nr = vector; vcpu->arch.interrupt.nr = vector;
} }
...@@ -24,4 +26,9 @@ static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) ...@@ -24,4 +26,9 @@ static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
vcpu->arch.nmi_injected; vcpu->arch.nmi_injected;
} }
static inline bool kvm_exception_is_soft(unsigned int nr)
{
return (nr == BP_VECTOR) || (nr == OF_VECTOR);
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment