Commit 404d5d7b authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: X86: Introduce more exit_fastpath_completion enum values

Adds a fastpath_t typedef since enum lines are a bit long, and replace
EXIT_FASTPATH_SKIP_EMUL_INS with two new exit_fastpath_completion enum values.

- EXIT_FASTPATH_EXIT_HANDLED  kvm will still go through it's full run loop,
                              but it would skip invoking the exit handler.

- EXIT_FASTPATH_REENTER_GUEST complete fastpath, guest can be re-entered
                              without invoking the exit handler or going
                              back to vcpu_run
Tested-by: default avatarHaiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Message-Id: <1588055009-12677-4-git-send-email-wanpengli@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5a9f5443
...@@ -182,8 +182,10 @@ enum { ...@@ -182,8 +182,10 @@ enum {
enum exit_fastpath_completion { enum exit_fastpath_completion {
EXIT_FASTPATH_NONE, EXIT_FASTPATH_NONE,
EXIT_FASTPATH_SKIP_EMUL_INS, EXIT_FASTPATH_REENTER_GUEST,
EXIT_FASTPATH_EXIT_HANDLED,
}; };
typedef enum exit_fastpath_completion fastpath_t;
struct x86_emulate_ctxt; struct x86_emulate_ctxt;
struct x86_exception; struct x86_exception;
......
...@@ -2893,8 +2893,7 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) ...@@ -2893,8 +2893,7 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
*info2 = control->exit_info_2; *info2 = control->exit_info_2;
} }
static int handle_exit(struct kvm_vcpu *vcpu, static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
enum exit_fastpath_completion exit_fastpath)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
...@@ -2952,10 +2951,10 @@ static int handle_exit(struct kvm_vcpu *vcpu, ...@@ -2952,10 +2951,10 @@ static int handle_exit(struct kvm_vcpu *vcpu,
__func__, svm->vmcb->control.exit_int_info, __func__, svm->vmcb->control.exit_int_info,
exit_code); exit_code);
if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) { if (exit_fastpath != EXIT_FASTPATH_NONE)
kvm_skip_emulated_instruction(vcpu);
return 1; return 1;
} else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) { || !svm_exit_handlers[exit_code]) {
vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code); vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
dump_vmcb(vcpu); dump_vmcb(vcpu);
...@@ -3324,7 +3323,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) ...@@ -3324,7 +3323,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
svm_complete_interrupts(svm); svm_complete_interrupts(svm);
} }
static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{ {
if (!is_guest_mode(vcpu) && if (!is_guest_mode(vcpu) &&
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
...@@ -3336,9 +3335,9 @@ static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu ...@@ -3336,9 +3335,9 @@ static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu) static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
enum exit_fastpath_completion exit_fastpath; fastpath_t exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
......
...@@ -5926,8 +5926,7 @@ void dump_vmcs(void) ...@@ -5926,8 +5926,7 @@ void dump_vmcs(void)
* The guest has exited. See if we can fix it or if we need userspace * The guest has exited. See if we can fix it or if we need userspace
* assistance. * assistance.
*/ */
static int vmx_handle_exit(struct kvm_vcpu *vcpu, static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
enum exit_fastpath_completion exit_fastpath)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason; u32 exit_reason = vmx->exit_reason;
...@@ -6034,10 +6033,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, ...@@ -6034,10 +6033,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
} }
} }
if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) { if (exit_fastpath != EXIT_FASTPATH_NONE)
kvm_skip_emulated_instruction(vcpu);
return 1; return 1;
}
if (exit_reason >= kvm_vmx_max_exit_handlers) if (exit_reason >= kvm_vmx_max_exit_handlers)
goto unexpected_vmexit; goto unexpected_vmexit;
...@@ -6628,7 +6625,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) ...@@ -6628,7 +6625,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
} }
} }
static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{ {
switch (to_vmx(vcpu)->exit_reason) { switch (to_vmx(vcpu)->exit_reason) {
case EXIT_REASON_MSR_WRITE: case EXIT_REASON_MSR_WRITE:
...@@ -6640,12 +6637,13 @@ static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu ...@@ -6640,12 +6637,13 @@ static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
{ {
enum exit_fastpath_completion exit_fastpath; fastpath_t exit_fastpath;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4; unsigned long cr3, cr4;
reenter_guest:
/* Record the guest's net vcpu time for enforced NMI injections. */ /* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi && if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked)) vmx->loaded_vmcs->soft_vnmi_blocked))
...@@ -6807,6 +6805,18 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6807,6 +6805,18 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
exit_fastpath = vmx_exit_handlers_fastpath(vcpu); exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
if (!kvm_vcpu_exit_request(vcpu)) {
/*
* FIXME: this goto should be a loop in vcpu_enter_guest,
* but it would incur the cost of a retpoline for now.
* Revisit once static calls are available.
*/
goto reenter_guest;
}
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
}
return exit_fastpath; return exit_fastpath;
} }
......
...@@ -1608,27 +1608,28 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data ...@@ -1608,27 +1608,28 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
return 1; return 1;
} }
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{ {
u32 msr = kvm_rcx_read(vcpu); u32 msr = kvm_rcx_read(vcpu);
u64 data; u64 data;
int ret = 0; fastpath_t ret = EXIT_FASTPATH_NONE;
switch (msr) { switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4): case APIC_BASE_MSR + (APIC_ICR >> 4):
data = kvm_read_edx_eax(vcpu); data = kvm_read_edx_eax(vcpu);
ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data); if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
kvm_skip_emulated_instruction(vcpu);
ret = EXIT_FASTPATH_EXIT_HANDLED;
}
break; break;
default: default:
return EXIT_FASTPATH_NONE; break;
} }
if (!ret) { if (ret != EXIT_FASTPATH_NONE)
trace_kvm_msr_write(msr, data); trace_kvm_msr_write(msr, data);
return EXIT_FASTPATH_SKIP_EMUL_INS;
}
return EXIT_FASTPATH_NONE; return ret;
} }
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
...@@ -8205,7 +8206,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8205,7 +8206,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_int_win = bool req_int_win =
dm_request_for_irq_injection(vcpu) && dm_request_for_irq_injection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu); kvm_cpu_accept_dm_intr(vcpu);
enum exit_fastpath_completion exit_fastpath; fastpath_t exit_fastpath;
bool req_immediate_exit = false; bool req_immediate_exit = false;
......
...@@ -274,7 +274,7 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -274,7 +274,7 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
bool kvm_vector_hashing_enabled(void); bool kvm_vector_hashing_enabled(void);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
extern u64 host_xcr0; extern u64 host_xcr0;
extern u64 supported_xcr0; extern u64 supported_xcr0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment