Commit 727ba748 authored by Felix Wilhelm's avatar Felix Wilhelm Committed by Paolo Bonzini

kvm: nVMX: Enforce cpl=0 for VMX instructions

VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.

Fixes: 70f3aac9("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarFelix Wilhelm <fwilhelm@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f4160e45
...@@ -7905,6 +7905,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -7905,6 +7905,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
/* CPL=0 must be checked manually. */
if (vmx_get_cpl(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (vmx->nested.vmxon) { if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
...@@ -7964,6 +7970,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -7964,6 +7970,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
*/ */
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{ {
if (vmx_get_cpl(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
if (!to_vmx(vcpu)->nested.vmxon) { if (!to_vmx(vcpu)->nested.vmxon) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 0; return 0;
...@@ -8283,7 +8294,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -8283,7 +8294,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &gva)) vmx_instruction_info, true, &gva))
return 1; return 1;
/* _system ok, as hardware has verified cpl=0 */ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
} }
...@@ -8448,7 +8459,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -8448,7 +8459,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva)) vmx_instruction_info, true, &vmcs_gva))
return 1; return 1;
/* ok to use *_system, as hardware has verified cpl=0 */ /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr, (void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) { sizeof(u64), &e)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment