Commit c2fe3cd4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move vendor CR4 validity check to dedicated kvm_x86_ops hook

Split out VMX's checks on CR4.VMXE to a dedicated hook, .is_valid_cr4(),
and invoke the new hook from kvm_valid_cr4().  This fixes an issue where
KVM_SET_SREGS would return success while failing to actually set CR4.

Fixing the issue by explicitly checking kvm_x86_ops.set_cr4()'s return
in __set_sregs() is not a viable option as KVM has already stuffed a
variety of vCPU state.

Note, kvm_valid_cr4() and is_valid_cr4() have different return types and
inverted semantics.  This will be remedied in a future patch.

Fixes: 5e1746d6 ("KVM: nVMX: Allow setting the VMXE bit in CR4")
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20201007014417.29276-5-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 311a0659
...@@ -1115,7 +1115,8 @@ struct kvm_x86_ops { ...@@ -1115,7 +1115,8 @@ struct kvm_x86_ops {
struct kvm_segment *var, int seg); struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
......
...@@ -1682,7 +1682,12 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1682,7 +1682,12 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
update_cr0_intercept(svm); update_cr0_intercept(svm);
} }
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
return true;
}
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
...@@ -1696,7 +1701,6 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1696,7 +1701,6 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
cr4 |= host_cr4_mce; cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4; to_svm(vcpu)->vmcb->save.cr4 = cr4;
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
return 0;
} }
static void svm_set_segment(struct kvm_vcpu *vcpu, static void svm_set_segment(struct kvm_vcpu *vcpu,
...@@ -4212,6 +4216,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4212,6 +4216,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_cpl = svm_get_cpl, .get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits, .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.set_cr0 = svm_set_cr0, .set_cr0 = svm_set_cr0,
.is_valid_cr4 = svm_is_valid_cr4,
.set_cr4 = svm_set_cr4, .set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer, .set_efer = svm_set_efer,
.get_idt = svm_get_idt, .get_idt = svm_get_idt,
......
...@@ -358,7 +358,7 @@ void svm_vcpu_free_msrpm(u32 *msrpm); ...@@ -358,7 +358,7 @@ void svm_vcpu_free_msrpm(u32 *msrpm);
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void svm_flush_tlb(struct kvm_vcpu *vcpu); void svm_flush_tlb(struct kvm_vcpu *vcpu);
void disable_nmi_singlestep(struct vcpu_svm *svm); void disable_nmi_singlestep(struct vcpu_svm *svm);
bool svm_smi_blocked(struct kvm_vcpu *vcpu); bool svm_smi_blocked(struct kvm_vcpu *vcpu);
......
...@@ -4814,7 +4814,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -4814,7 +4814,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
/* /*
* The Intel VMX Instruction Reference lists a bunch of bits that are * The Intel VMX Instruction Reference lists a bunch of bits that are
* prerequisite to running VMXON, most notably cr4.VMXE must be set to * prerequisite to running VMXON, most notably cr4.VMXE must be set to
* 1 (see vmx_set_cr4() for when we allow the guest to set this). * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
* Otherwise, we should fail with #UD. But most faulting conditions * Otherwise, we should fail with #UD. But most faulting conditions
* have already been checked by hardware, prior to the VM-exit for * have already been checked by hardware, prior to the VM-exit for
* VMXON. We do test guest cr4.VMXE because processor CR4 always has * VMXON. We do test guest cr4.VMXE because processor CR4 always has
......
...@@ -3095,7 +3095,23 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd, ...@@ -3095,7 +3095,23 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
vmcs_writel(GUEST_CR3, guest_cr3); vmcs_writel(GUEST_CR3, guest_cr3);
} }
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
* We operate under the default treatment of SMM, so VMX cannot be
* enabled under SMM. Note, whether or not VMXE is allowed at all is
* handled by kvm_valid_cr4().
*/
if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
return false;
if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
return false;
return true;
}
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
/* /*
...@@ -3123,17 +3139,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -3123,17 +3139,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
} }
} }
/*
* We operate under the default treatment of SMM, so VMX cannot be
* enabled under SMM. Note, whether or not VMXE is allowed at all is
* handled by kvm_valid_cr4().
*/
if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
return 1;
if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
return 1;
vcpu->arch.cr4 = cr4; vcpu->arch.cr4 = cr4;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
...@@ -3164,7 +3169,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -3164,7 +3169,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4); vmcs_writel(GUEST_CR4, hw_cr4);
return 0;
} }
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
...@@ -7612,6 +7616,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7612,6 +7616,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_cpl = vmx_get_cpl, .get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits, .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.set_cr0 = vmx_set_cr0, .set_cr0 = vmx_set_cr0,
.is_valid_cr4 = vmx_is_valid_cr4,
.set_cr4 = vmx_set_cr4, .set_cr4 = vmx_set_cr4,
.set_efer = vmx_set_efer, .set_efer = vmx_set_efer,
.get_idt = vmx_get_idt, .get_idt = vmx_get_idt,
......
...@@ -321,7 +321,7 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); ...@@ -321,7 +321,7 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
void ept_save_pdptrs(struct kvm_vcpu *vcpu); void ept_save_pdptrs(struct kvm_vcpu *vcpu);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
......
...@@ -972,6 +972,9 @@ int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -972,6 +972,9 @@ int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
return -EINVAL; return -EINVAL;
if (!kvm_x86_ops.is_valid_cr4(vcpu, cr4))
return -EINVAL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_valid_cr4); EXPORT_SYMBOL_GPL(kvm_valid_cr4);
...@@ -1006,8 +1009,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1006,8 +1009,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1; return 1;
} }
if (kvm_x86_ops.set_cr4(vcpu, cr4)) kvm_x86_ops.set_cr4(vcpu, cr4);
return 1;
if (((cr4 ^ old_cr4) & mmu_role_bits) || if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment