Commit 70f3aac9 authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

kvm: nVMX: Remove superfluous VMX instruction fault checks

According to the Intel SDM, "Certain exceptions have priority over VM
exits. These include invalid-opcode exceptions, faults based on
privilege level*, and general-protection exceptions that are based on
checking I/O permission bits in the task-state segment (TSS)."

There is no need to check for faulting conditions that the hardware
has already checked.

* These include faults generated by attempts to execute, in
  virtual-8086 mode, privileged instructions that are not recognized
  in that mode.
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6ed071f0
...@@ -7052,34 +7052,24 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) ...@@ -7052,34 +7052,24 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
static int handle_vmon(struct kvm_vcpu *vcpu) static int handle_vmon(struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
struct kvm_segment cs;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
/* The Intel VMX Instruction Reference lists a bunch of bits that /*
* are prerequisite to running VMXON, most notably cr4.VMXE must be * The Intel VMX Instruction Reference lists a bunch of bits that are
* set to 1 (see vmx_set_cr4() for when we allow the guest to set this). * prerequisite to running VMXON, most notably cr4.VMXE must be set to
* Otherwise, we should fail with #UD. We test these now: * 1 (see vmx_set_cr4() for when we allow the guest to set this).
* Otherwise, we should fail with #UD. But most faulting conditions
* have already been checked by hardware, prior to the VM-exit for
* VMXON. We do test guest cr4.VMXE because processor CR4 always has
* that bit set to 1 in non-root mode.
*/ */
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
!kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
(vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
if (is_long_mode(vcpu) && !cs.l) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
if (vmx_get_cpl(vcpu)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (vmx->nested.vmxon) { if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
...@@ -7106,29 +7096,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -7106,29 +7096,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Intel's VMX Instruction Reference specifies a common set of prerequisites * Intel's VMX Instruction Reference specifies a common set of prerequisites
* for running VMX instructions (except VMXON, whose prerequisites are * for running VMX instructions (except VMXON, whose prerequisites are
* slightly different). It also specifies what exception to inject otherwise. * slightly different). It also specifies what exception to inject otherwise.
* Note that many of these exceptions have priority over VM exits, so they
* don't have to be checked again here.
*/ */
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{ {
struct kvm_segment cs; if (!to_vmx(vcpu)->nested.vmxon) {
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx->nested.vmxon) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 0; return 0;
} }
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
(is_long_mode(vcpu) && !cs.l)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
if (vmx_get_cpl(vcpu)) {
kvm_inject_gp(vcpu, 0);
return 0;
}
return 1; return 1;
} }
...@@ -7472,7 +7448,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -7472,7 +7448,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &gva)) vmx_instruction_info, true, &gva))
return 1; return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */ /* _system ok, as hardware has verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
} }
...@@ -7605,7 +7581,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -7605,7 +7581,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva)) vmx_instruction_info, true, &vmcs_gva))
return 1; return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ /* ok to use *_system, as hardware has verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr, (void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) { sizeof(u64), &e)) {
...@@ -7638,11 +7614,6 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -7638,11 +7614,6 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment