Commit 4be53410 authored by Xiaoyao Li's avatar Xiaoyao Li Committed by Paolo Bonzini

KVM: VMX: Initialize vmx->guest_msrs[] right after allocation

Move the initialization of vmx->guest_msrs[] from vmx_vcpu_setup() to
vmx_create_vcpu(), and put it right after its allocation.

This also is the preperation for next patch.
Signed-off-by: default avatarXiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3c0f4be1
...@@ -4166,8 +4166,6 @@ static void ept_set_mmio_spte_mask(void) ...@@ -4166,8 +4166,6 @@ static void ept_set_mmio_spte_mask(void)
*/ */
static void vmx_vcpu_setup(struct vcpu_vmx *vmx) static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
{ {
int i;
if (nested) if (nested)
nested_vmx_vcpu_setup(); nested_vmx_vcpu_setup();
...@@ -4226,21 +4224,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -4226,21 +4224,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs;
}
vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
/* 22.2.1, 20.8.1 */ /* 22.2.1, 20.8.1 */
...@@ -6700,7 +6683,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -6700,7 +6683,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
int err; int err;
struct vcpu_vmx *vmx; struct vcpu_vmx *vmx;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
int cpu; int i, cpu;
BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0, BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
"struct kvm_vcpu must be at offset 0 for arch usercopy region"); "struct kvm_vcpu must be at offset 0 for arch usercopy region");
...@@ -6752,6 +6735,21 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -6752,6 +6735,21 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx->guest_msrs) if (!vmx->guest_msrs)
goto free_pml; goto free_pml;
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs;
}
err = alloc_loaded_vmcs(&vmx->vmcs01); err = alloc_loaded_vmcs(&vmx->vmcs01);
if (err < 0) if (err < 0)
goto free_msrs; goto free_msrs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment