Commit 1389309c authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Radim Krčmář

KVM: nVMX: expose VMX capabilities for nested hypervisors to userspace

Use the new MSR feature framework to tell userspace which VMX capabilities
are available for nested hypervisors.  Before, these were only accessible
with the KVM_GET_MSR VCPU ioctl, after VCPUs had been created.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 6677f3da
...@@ -958,6 +958,7 @@ static struct vmcs_config { ...@@ -958,6 +958,7 @@ static struct vmcs_config {
u32 cpu_based_2nd_exec_ctrl; u32 cpu_based_2nd_exec_ctrl;
u32 vmexit_ctrl; u32 vmexit_ctrl;
u32 vmentry_ctrl; u32 vmentry_ctrl;
struct nested_vmx_msrs nested;
} vmcs_config; } vmcs_config;
static struct vmx_capability { static struct vmx_capability {
...@@ -2689,6 +2690,11 @@ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) ...@@ -2689,6 +2690,11 @@ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
*/ */
static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv) static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
{ {
if (!nested) {
memset(msrs, 0, sizeof(*msrs));
return;
}
/* /*
* Note that as a general rule, the high half of the MSRs (bits in * Note that as a general rule, the high half of the MSRs (bits in
* the control fields which may be 1) should be initialized by the * the control fields which may be 1) should be initialized by the
...@@ -2713,13 +2719,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv) ...@@ -2713,13 +2719,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->pinbased_ctls_high &= msrs->pinbased_ctls_high &=
PIN_BASED_EXT_INTR_MASK | PIN_BASED_EXT_INTR_MASK |
PIN_BASED_NMI_EXITING | PIN_BASED_NMI_EXITING |
PIN_BASED_VIRTUAL_NMIS; PIN_BASED_VIRTUAL_NMIS |
(apicv ? PIN_BASED_POSTED_INTR : 0);
msrs->pinbased_ctls_high |= msrs->pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER; PIN_BASED_VMX_PREEMPTION_TIMER;
if (apicv)
msrs->pinbased_ctls_high |=
PIN_BASED_POSTED_INTR;
/* exit controls */ /* exit controls */
rdmsr(MSR_IA32_VMX_EXIT_CTLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS,
...@@ -3231,7 +3235,16 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, ...@@ -3231,7 +3235,16 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
static int vmx_get_msr_feature(struct kvm_msr_entry *msr) static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
{ {
switch (msr->index) {
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested)
return 1; return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
default:
return 1;
}
return 0;
} }
/* /*
...@@ -3697,6 +3710,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -3697,6 +3710,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
u32 _vmexit_control = 0; u32 _vmexit_control = 0;
u32 _vmentry_control = 0; u32 _vmentry_control = 0;
memset(vmcs_conf, 0, sizeof(*vmcs_conf));
min = CPU_BASED_HLT_EXITING | min = CPU_BASED_HLT_EXITING |
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_LOAD_EXITING |
...@@ -7091,6 +7105,7 @@ static __init int hardware_setup(void) ...@@ -7091,6 +7105,7 @@ static __init int hardware_setup(void)
init_vmcs_shadow_fields(); init_vmcs_shadow_fields();
kvm_set_posted_intr_wakeup_handler(wakeup_handler); kvm_set_posted_intr_wakeup_handler(wakeup_handler);
nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv);
kvm_mce_cap_supported |= MCG_LMCE_P; kvm_mce_cap_supported |= MCG_LMCE_P;
...@@ -9822,6 +9837,7 @@ static void __init vmx_check_processor_compat(void *rtn) ...@@ -9822,6 +9837,7 @@ static void __init vmx_check_processor_compat(void *rtn)
*(int *)rtn = 0; *(int *)rtn = 0;
if (setup_vmcs_config(&vmcs_conf) < 0) if (setup_vmcs_config(&vmcs_conf) < 0)
*(int *)rtn = -EIO; *(int *)rtn = -EIO;
nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, enable_apicv);
if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
smp_processor_id()); smp_processor_id());
......
...@@ -1056,6 +1056,25 @@ static unsigned num_emulated_msrs; ...@@ -1056,6 +1056,25 @@ static unsigned num_emulated_msrs;
* can be used by a hypervisor to validate requested CPU features. * can be used by a hypervisor to validate requested CPU features.
*/ */
static u32 msr_based_features[] = { static u32 msr_based_features[] = {
MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
MSR_IA32_VMX_PINBASED_CTLS,
MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
MSR_IA32_VMX_PROCBASED_CTLS,
MSR_IA32_VMX_TRUE_EXIT_CTLS,
MSR_IA32_VMX_EXIT_CTLS,
MSR_IA32_VMX_TRUE_ENTRY_CTLS,
MSR_IA32_VMX_ENTRY_CTLS,
MSR_IA32_VMX_MISC,
MSR_IA32_VMX_CR0_FIXED0,
MSR_IA32_VMX_CR0_FIXED1,
MSR_IA32_VMX_CR4_FIXED0,
MSR_IA32_VMX_CR4_FIXED1,
MSR_IA32_VMX_VMCS_ENUM,
MSR_IA32_VMX_PROCBASED_CTLS2,
MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC,
MSR_F10H_DECFG, MSR_F10H_DECFG,
MSR_IA32_UCODE_REV, MSR_IA32_UCODE_REV,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment