Commit 42cbf068 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move #GP injection for VMware into x86_emulate_instruction()

Immediately inject a #GP when VMware emulation fails and return
EMULATE_DONE instead of propagating EMULATE_FAIL up the stack.  This
helps pave the way for removing EMULATE_FAIL altogether.

Rename EMULTYPE_VMWARE to EMULTYPE_VMWARE_GP to document that the x86
emulator is called to handle VMware #GP interception, e.g. why a #GP
is injected on emulation failure for EMULTYPE_VMWARE_GP.

Drop EMULTYPE_NO_UD_ON_FAIL as a standalone type.  The "no #UD on fail"
is used only in the VMWare case and is obsoleted by having the emulator
itself reinject #GP.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarLiran Alon <liran.alon@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a6c6ed1e
...@@ -1325,8 +1325,7 @@ enum emulation_result { ...@@ -1325,8 +1325,7 @@ enum emulation_result {
#define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2) #define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_ALLOW_RETRY (1 << 3) #define EMULTYPE_ALLOW_RETRY (1 << 3)
#define EMULTYPE_NO_UD_ON_FAIL (1 << 4) #define EMULTYPE_VMWARE_GP (1 << 5)
#define EMULTYPE_VMWARE (1 << 5)
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void *insn, int insn_len); void *insn, int insn_len);
......
...@@ -2768,7 +2768,6 @@ static int gp_interception(struct vcpu_svm *svm) ...@@ -2768,7 +2768,6 @@ static int gp_interception(struct vcpu_svm *svm)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
u32 error_code = svm->vmcb->control.exit_info_1; u32 error_code = svm->vmcb->control.exit_info_1;
int er;
WARN_ON_ONCE(!enable_vmware_backdoor); WARN_ON_ONCE(!enable_vmware_backdoor);
...@@ -2780,13 +2779,8 @@ static int gp_interception(struct vcpu_svm *svm) ...@@ -2780,13 +2779,8 @@ static int gp_interception(struct vcpu_svm *svm)
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
return 1; return 1;
} }
er = kvm_emulate_instruction(vcpu, return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP) !=
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); EMULATE_USER_EXIT;
if (er == EMULATE_USER_EXIT)
return 0;
else if (er != EMULATE_DONE)
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1;
} }
static bool is_erratum_383(void) static bool is_erratum_383(void)
......
...@@ -4522,7 +4522,6 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -4522,7 +4522,6 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
u32 intr_info, ex_no, error_code; u32 intr_info, ex_no, error_code;
unsigned long cr2, rip, dr6; unsigned long cr2, rip, dr6;
u32 vect_info; u32 vect_info;
enum emulation_result er;
vect_info = vmx->idt_vectoring_info; vect_info = vmx->idt_vectoring_info;
intr_info = vmx->exit_intr_info; intr_info = vmx->exit_intr_info;
...@@ -4549,13 +4548,8 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -4549,13 +4548,8 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
return 1; return 1;
} }
er = kvm_emulate_instruction(vcpu, return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP) !=
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); EMULATE_USER_EXIT;
if (er == EMULATE_USER_EXIT)
return 0;
else if (er != EMULATE_DONE)
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1;
} }
/* /*
......
...@@ -6307,8 +6307,10 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) ...@@ -6307,8 +6307,10 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
++vcpu->stat.insn_emulation_fail; ++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu); trace_kvm_emulate_insn_failed(vcpu);
if (emulation_type & EMULTYPE_NO_UD_ON_FAIL) if (emulation_type & EMULTYPE_VMWARE_GP) {
return EMULATE_FAIL; kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return EMULATE_DONE;
}
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
...@@ -6648,9 +6650,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -6648,9 +6650,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
} }
} }
if ((emulation_type & EMULTYPE_VMWARE) && if ((emulation_type & EMULTYPE_VMWARE_GP) &&
!is_vmware_backdoor_opcode(ctxt)) !is_vmware_backdoor_opcode(ctxt)) {
return EMULATE_FAIL; kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return EMULATE_DONE;
}
if (emulation_type & EMULTYPE_SKIP) { if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip); kvm_rip_write(vcpu, ctxt->_eip);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment