Commit 62ef68bb authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: introduce num_emulated_msrs

We will want to filter away MSR_IA32_SMBASE from the emulated_msrs if
the host CPU does not support SMM virtualization.  Introduce the
logic to do that, and also move paravirt MSRs to emulated_msrs for
simplicity and to get rid of KVM_SAVE_MSRS_BEGIN.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e69fab5d
...@@ -925,17 +925,11 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); ...@@ -925,17 +925,11 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* *
* This list is modified at module load time to reflect the * This list is modified at module load time to reflect the
* capabilities of the host cpu. This capabilities test skips MSRs that are * capabilities of the host cpu. This capabilities test skips MSRs that are
* kvm-specific. Those are put in the beginning of the list. * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
* may depend on host virtualization features rather than host cpu features.
*/ */
#define KVM_SAVE_MSRS_BEGIN 12
static u32 msrs_to_save[] = { static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR, MSR_STAR,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -947,7 +941,14 @@ static u32 msrs_to_save[] = { ...@@ -947,7 +941,14 @@ static u32 msrs_to_save[] = {
static unsigned num_msrs_to_save; static unsigned num_msrs_to_save;
static const u32 emulated_msrs[] = { static u32 emulated_msrs[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
MSR_IA32_TSC_ADJUST, MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE, MSR_IA32_TSCDEADLINE,
MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE,
...@@ -955,6 +956,8 @@ static const u32 emulated_msrs[] = { ...@@ -955,6 +956,8 @@ static const u32 emulated_msrs[] = {
MSR_IA32_MCG_CTL, MSR_IA32_MCG_CTL,
}; };
static unsigned num_emulated_msrs;
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
if (efer & efer_reserved_bits) if (efer & efer_reserved_bits)
...@@ -2928,7 +2931,7 @@ long kvm_arch_dev_ioctl(struct file *filp, ...@@ -2928,7 +2931,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
goto out; goto out;
n = msr_list.nmsrs; n = msr_list.nmsrs;
msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
goto out; goto out;
r = -E2BIG; r = -E2BIG;
...@@ -2940,7 +2943,7 @@ long kvm_arch_dev_ioctl(struct file *filp, ...@@ -2940,7 +2943,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
goto out; goto out;
if (copy_to_user(user_msr_list->indices + num_msrs_to_save, if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
&emulated_msrs, &emulated_msrs,
ARRAY_SIZE(emulated_msrs) * sizeof(u32))) num_emulated_msrs * sizeof(u32)))
goto out; goto out;
r = 0; r = 0;
break; break;
...@@ -4206,8 +4209,7 @@ static void kvm_init_msr_list(void) ...@@ -4206,8 +4209,7 @@ static void kvm_init_msr_list(void)
u32 dummy[2]; u32 dummy[2];
unsigned i, j; unsigned i, j;
/* skip the first msrs in the list. KVM-specific */ for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
continue; continue;
...@@ -4232,6 +4234,18 @@ static void kvm_init_msr_list(void) ...@@ -4232,6 +4234,18 @@ static void kvm_init_msr_list(void)
j++; j++;
} }
num_msrs_to_save = j; num_msrs_to_save = j;
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
switch (emulated_msrs[i]) {
default:
break;
}
if (j < i)
emulated_msrs[j] = emulated_msrs[i];
j++;
}
num_emulated_msrs = j;
} }
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment