Commit f8ae08f9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Let userspace set nVMX MSR to any _host_ supported value

Restrict the nVMX MSRs based on KVM's config, not based on the guest's
current config.  Using the guest's config to audit the new config
prevents userspace from restoring the original config (KVM's config) if
at any point in the past the guest's config was restricted in any way.

Fixes: 62cc6b9d ("KVM: nVMX: support restore of VMX capability MSRs")
Cc: stable@vger.kernel.org
Cc: David Matlack <dmatlack@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220607213604.3346000-6-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a645c2b5
...@@ -1224,7 +1224,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) ...@@ -1224,7 +1224,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
/* reserved */ /* reserved */
BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
u64 vmx_basic = vmx->nested.msrs.basic; u64 vmx_basic = vmcs_config.nested.basic;
if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
return -EINVAL; return -EINVAL;
...@@ -1247,36 +1247,42 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) ...@@ -1247,36 +1247,42 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
return 0; return 0;
} }
static int static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) u32 **low, u32 **high)
{ {
u64 supported;
u32 *lowp, *highp;
switch (msr_index) { switch (msr_index) {
case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
lowp = &vmx->nested.msrs.pinbased_ctls_low; *low = &msrs->pinbased_ctls_low;
highp = &vmx->nested.msrs.pinbased_ctls_high; *high = &msrs->pinbased_ctls_high;
break; break;
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
lowp = &vmx->nested.msrs.procbased_ctls_low; *low = &msrs->procbased_ctls_low;
highp = &vmx->nested.msrs.procbased_ctls_high; *high = &msrs->procbased_ctls_high;
break; break;
case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS:
lowp = &vmx->nested.msrs.exit_ctls_low; *low = &msrs->exit_ctls_low;
highp = &vmx->nested.msrs.exit_ctls_high; *high = &msrs->exit_ctls_high;
break; break;
case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
lowp = &vmx->nested.msrs.entry_ctls_low; *low = &msrs->entry_ctls_low;
highp = &vmx->nested.msrs.entry_ctls_high; *high = &msrs->entry_ctls_high;
break; break;
case MSR_IA32_VMX_PROCBASED_CTLS2: case MSR_IA32_VMX_PROCBASED_CTLS2:
lowp = &vmx->nested.msrs.secondary_ctls_low; *low = &msrs->secondary_ctls_low;
highp = &vmx->nested.msrs.secondary_ctls_high; *high = &msrs->secondary_ctls_high;
break; break;
default: default:
BUG(); BUG();
} }
}
static int
vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
{
u32 *lowp, *highp;
u64 supported;
vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
supported = vmx_control_msr(*lowp, *highp); supported = vmx_control_msr(*lowp, *highp);
...@@ -1288,6 +1294,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) ...@@ -1288,6 +1294,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
return -EINVAL; return -EINVAL;
vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
*lowp = data; *lowp = data;
*highp = data >> 32; *highp = data >> 32;
return 0; return 0;
...@@ -1301,10 +1308,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) ...@@ -1301,10 +1308,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
/* reserved */ /* reserved */
GENMASK_ULL(13, 9) | BIT_ULL(31); GENMASK_ULL(13, 9) | BIT_ULL(31);
u64 vmx_misc; u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
vmcs_config.nested.misc_high);
vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
vmx->nested.msrs.misc_high);
if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
return -EINVAL; return -EINVAL;
...@@ -1332,10 +1337,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) ...@@ -1332,10 +1337,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
{ {
u64 vmx_ept_vpid_cap; u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
vmcs_config.nested.vpid_caps);
vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
vmx->nested.msrs.vpid_caps);
/* Every bit is either reserved or a feature bit. */ /* Every bit is either reserved or a feature bit. */
if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
...@@ -1346,20 +1349,21 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) ...@@ -1346,20 +1349,21 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
return 0; return 0;
} }
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
{ {
u64 *msr;
switch (msr_index) { switch (msr_index) {
case MSR_IA32_VMX_CR0_FIXED0: case MSR_IA32_VMX_CR0_FIXED0:
msr = &vmx->nested.msrs.cr0_fixed0; return &msrs->cr0_fixed0;
break;
case MSR_IA32_VMX_CR4_FIXED0: case MSR_IA32_VMX_CR4_FIXED0:
msr = &vmx->nested.msrs.cr4_fixed0; return &msrs->cr4_fixed0;
break;
default: default:
BUG(); BUG();
} }
}
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
{
const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
/* /*
* 1 bits (which indicates bits which "must-be-1" during VMX operation) * 1 bits (which indicates bits which "must-be-1" during VMX operation)
...@@ -1368,7 +1372,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) ...@@ -1368,7 +1372,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
if (!is_bitwise_subset(data, *msr, -1ULL)) if (!is_bitwise_subset(data, *msr, -1ULL))
return -EINVAL; return -EINVAL;
*msr = data; *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
return 0; return 0;
} }
...@@ -1429,7 +1433,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -1429,7 +1433,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vmx->nested.msrs.vmcs_enum = data; vmx->nested.msrs.vmcs_enum = data;
return 0; return 0;
case MSR_IA32_VMX_VMFUNC: case MSR_IA32_VMX_VMFUNC:
if (data & ~vmx->nested.msrs.vmfunc_controls) if (data & ~vmcs_config.nested.vmfunc_controls)
return -EINVAL; return -EINVAL;
vmx->nested.msrs.vmfunc_controls = data; vmx->nested.msrs.vmfunc_controls = data;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment