Commit 11988499 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes

KVM allows userspace to violate consistency checks related to the
guest's CPUID model to some degree.  Generally speaking, userspace has
carte blanche when it comes to guest state so long as jamming invalid
state won't negatively affect the host.

Currently this is seems to be a non-issue as most of the interesting
EFER checks are missing, e.g. NX and LME, but those will be added
shortly.  Proactively exempt userspace from the CPUID checks so as not
to break userspace.

Note, the efer_reserved_bits check still applies to userspace writes as
that mask reflects the host's capabilities, e.g. KVM shouldn't allow a
guest to run with NX=1 if it has been disabled in the host.

Fixes: d8017474 ("KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c80add0f
...@@ -1258,31 +1258,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) ...@@ -1258,31 +1258,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return 0; return 0;
} }
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
if (efer & efer_reserved_bits)
return false;
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
return false; return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return false; return false;
return true; return true;
}
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & efer_reserved_bits)
return false;
return __kvm_valid_efer(vcpu, efer);
} }
EXPORT_SYMBOL_GPL(kvm_valid_efer); EXPORT_SYMBOL_GPL(kvm_valid_efer);
static int set_efer(struct kvm_vcpu *vcpu, u64 efer) static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
u64 old_efer = vcpu->arch.efer; u64 old_efer = vcpu->arch.efer;
u64 efer = msr_info->data;
if (!kvm_valid_efer(vcpu, efer)) if (efer & efer_reserved_bits)
return 1; return false;
if (is_paging(vcpu) if (!msr_info->host_initiated) {
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) if (!__kvm_valid_efer(vcpu, efer))
return 1; return 1;
if (is_paging(vcpu) &&
(vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
}
efer &= ~EFER_LMA; efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA;
...@@ -2452,7 +2463,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2452,7 +2463,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.arch_capabilities = data; vcpu->arch.arch_capabilities = data;
break; break;
case MSR_EFER: case MSR_EFER:
return set_efer(vcpu, data); return set_efer(vcpu, msr_info);
case MSR_K7_HWCR: case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment