Commit a9ab13ff authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: X86: Improve latency for single target IPI fastpath

IPI and Timer cause the main MSRs write vmexits in cloud environment
observation, let's optimize virtual IPI latency more aggressively to
inject target IPI as soon as possible.

Running kvm-unit-tests/vmexit.flat IPI testing on SKX server, disable
adaptive advance lapic timer and adaptive halt-polling to avoid the
interference, this patch can give another 7% improvement.

w/o fastpath   -> x86.c fastpath      4238 -> 3543  16.4%
x86.c fastpath -> vmx.c fastpath      3543 -> 3293     7%
w/o fastpath   -> vmx.c fastpath      4238 -> 3293  22.3%

Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200410174703.1138-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 873e1da1
...@@ -1126,7 +1126,7 @@ struct kvm_x86_ops { ...@@ -1126,7 +1126,7 @@ struct kvm_x86_ops {
*/ */
void (*tlb_flush_guest)(struct kvm_vcpu *vcpu); void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
void (*run)(struct kvm_vcpu *vcpu); enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu, int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath); enum exit_fastpath_completion exit_fastpath);
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
...@@ -1176,8 +1176,7 @@ struct kvm_x86_ops { ...@@ -1176,8 +1176,7 @@ struct kvm_x86_ops {
struct x86_instruction_info *info, struct x86_instruction_info *info,
enum x86_intercept_stage stage, enum x86_intercept_stage stage,
struct x86_exception *exception); struct x86_exception *exception);
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu, void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
enum exit_fastpath_completion *exit_fastpath);
int (*check_nested_events)(struct kvm_vcpu *vcpu); int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
......
...@@ -3299,10 +3299,21 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) ...@@ -3299,10 +3299,21 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
svm_complete_interrupts(svm); svm_complete_interrupts(svm);
} }
static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
if (!is_guest_mode(vcpu) &&
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
to_svm(vcpu)->vmcb->control.exit_info_1)
return handle_fastpath_set_msr_irqoff(vcpu);
return EXIT_FASTPATH_NONE;
}
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
static void svm_vcpu_run(struct kvm_vcpu *vcpu) static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
enum exit_fastpath_completion exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
...@@ -3314,7 +3325,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3314,7 +3325,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* again. * again.
*/ */
if (unlikely(svm->nested.exit_required)) if (unlikely(svm->nested.exit_required))
return; return EXIT_FASTPATH_NONE;
/* /*
* Disable singlestep if we're injecting an interrupt/exception. * Disable singlestep if we're injecting an interrupt/exception.
...@@ -3398,6 +3409,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3398,6 +3409,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi(); stgi();
/* Any pending NMI will happen here */ /* Any pending NMI will happen here */
exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu); kvm_after_interrupt(&svm->vcpu);
...@@ -3426,6 +3438,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3426,6 +3438,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
svm_handle_mce(svm); svm_handle_mce(svm);
mark_all_clean(svm->vmcb); mark_all_clean(svm->vmcb);
return exit_fastpath;
} }
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root) static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
...@@ -3727,13 +3740,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, ...@@ -3727,13 +3740,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu, static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
enum exit_fastpath_completion *exit_fastpath)
{ {
if (!is_guest_mode(vcpu) &&
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
to_svm(vcpu)->vmcb->control.exit_info_1)
*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
} }
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
......
...@@ -6350,8 +6350,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) ...@@ -6350,8 +6350,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
} }
STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff); STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff);
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu, static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
enum exit_fastpath_completion *exit_fastpath)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -6359,9 +6358,6 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu, ...@@ -6359,9 +6358,6 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu,
handle_external_interrupt_irqoff(vcpu); handle_external_interrupt_irqoff(vcpu);
else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI) else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
handle_exception_nmi_irqoff(vmx); handle_exception_nmi_irqoff(vmx);
else if (!is_guest_mode(vcpu) &&
vmx->exit_reason == EXIT_REASON_MSR_WRITE)
*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
} }
static bool vmx_has_emulated_msr(int index) static bool vmx_has_emulated_msr(int index)
...@@ -6565,8 +6561,9 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) ...@@ -6565,8 +6561,9 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static void vmx_vcpu_run(struct kvm_vcpu *vcpu) static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
{ {
enum exit_fastpath_completion exit_fastpath;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4; unsigned long cr3, cr4;
...@@ -6578,7 +6575,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6578,7 +6575,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Don't enter VMX if guest state is invalid, let the exit handler /* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */ start emulation until we arrive back to a valid state */
if (vmx->emulation_required) if (vmx->emulation_required)
return; return EXIT_FASTPATH_NONE;
if (vmx->ple_window_dirty) { if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false; vmx->ple_window_dirty = false;
...@@ -6726,7 +6723,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6726,7 +6723,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(vmx->fail)) { if (unlikely(vmx->fail)) {
vmx->exit_reason = 0xdead; vmx->exit_reason = 0xdead;
return; return EXIT_FASTPATH_NONE;
} }
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
...@@ -6734,13 +6731,20 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6734,13 +6731,20 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_machine_check(); kvm_machine_check();
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
return; return EXIT_FASTPATH_NONE;
if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE)
exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
else
exit_fastpath = EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1; vmx->loaded_vmcs->launched = 1;
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx_recover_nmi_blocking(vmx); vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx); vmx_complete_interrupts(vmx);
return exit_fastpath;
} }
static void vmx_free_vcpu(struct kvm_vcpu *vcpu) static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
......
...@@ -8179,7 +8179,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8179,7 +8179,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_int_win = bool req_int_win =
dm_request_for_irq_injection(vcpu) && dm_request_for_irq_injection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu); kvm_cpu_accept_dm_intr(vcpu);
enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE; enum exit_fastpath_completion exit_fastpath;
bool req_immediate_exit = false; bool req_immediate_exit = false;
...@@ -8406,7 +8406,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8406,7 +8406,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
} }
kvm_x86_ops.run(vcpu); exit_fastpath = kvm_x86_ops.run(vcpu);
/* /*
* Do this here before restoring debug registers on the host. And * Do this here before restoring debug registers on the host. And
...@@ -8438,7 +8438,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8438,7 +8438,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
kvm_x86_ops.handle_exit_irqoff(vcpu, &exit_fastpath); kvm_x86_ops.handle_exit_irqoff(vcpu);
/* /*
* Consume any pending interrupts, including the possible source of * Consume any pending interrupts, including the possible source of
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment