Commit f2485b3e authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: use guest_exit_irqoff

This gains a few clock cycles per vmexit.  On Intel there is no need
anymore to enable the interrupts in vmx_handle_external_intr, since
we are using the "acknowledge interrupt on exit" feature.  AMD
needs to do that, and must be careful to avoid the interrupt shadow.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 91fa0f8e
......@@ -4935,6 +4935,12 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
local_irq_enable();
/*
* We must have an instruction with interrupts enabled, so
* the timer interrupt isn't delayed by the interrupt shadow.
*/
asm("nop");
local_irq_disable();
}
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
......
......@@ -8574,7 +8574,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
"push %[sp]\n\t"
#endif
"pushf\n\t"
"orl $0x200, (%%" _ASM_SP ")\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t"
"call *%[entry]\n\t"
:
......@@ -8587,8 +8586,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
} else
local_irq_enable();
}
}
static bool vmx_has_high_real_mode_segbase(void)
......
......@@ -6709,16 +6709,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
++vcpu->stat.exits;
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
guest_exit();
guest_exit_irqoff();
local_irq_enable();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment