Commit 33966dd6 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Thomas Gleixner

x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers

There is no semantic change but this change allows an unbalanced amount of
MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
restore on VMEXIT or VMENTER may be different.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c595ceee
...@@ -808,6 +808,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc) ...@@ -808,6 +808,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control); (unsigned long *)&pi_desc->control);
} }
struct vmx_msrs {
unsigned int nr;
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
};
struct vcpu_vmx { struct vcpu_vmx {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
unsigned long host_rsp; unsigned long host_rsp;
...@@ -841,9 +846,8 @@ struct vcpu_vmx { ...@@ -841,9 +846,8 @@ struct vcpu_vmx {
struct loaded_vmcs *loaded_vmcs; struct loaded_vmcs *loaded_vmcs;
bool __launched; /* temporary, used in vmx_vcpu_run */ bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload { struct msr_autoload {
unsigned nr; struct vmx_msrs guest;
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; struct vmx_msrs host;
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
} msr_autoload; } msr_autoload;
struct { struct {
int loaded; int loaded;
...@@ -2440,18 +2444,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -2440,18 +2444,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
} }
break; break;
} }
for (i = 0; i < m->guest.nr; ++i)
for (i = 0; i < m->nr; ++i) if (m->guest.val[i].index == msr)
if (m->guest[i].index == msr)
break; break;
if (i == m->nr) if (i == m->guest.nr)
return; return;
--m->nr; --m->guest.nr;
m->guest[i] = m->guest[m->nr]; --m->host.nr;
m->host[i] = m->host[m->nr]; m->guest.val[i] = m->guest.val[m->guest.nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); m->host.val[i] = m->host.val[m->host.nr];
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
} }
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
...@@ -2503,24 +2507,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -2503,24 +2507,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0); wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
} }
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->guest.nr; ++i)
if (m->guest[i].index == msr) if (m->guest.val[i].index == msr)
break; break;
if (i == NR_AUTOLOAD_MSRS) { if (i == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. " printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr); "Can't add msr %x\n", msr);
return; return;
} else if (i == m->nr) { } else if (i == m->guest.nr) {
++m->nr; ++m->guest.nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); ++m->host.nr;
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
} }
m->guest[i].index = msr; m->guest.val[i].index = msr;
m->guest[i].value = guest_val; m->guest.val[i].value = guest_val;
m->host[i].index = msr; m->host.val[i].index = msr;
m->host[i].value = host_val; m->host.val[i].value = host_val;
} }
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
...@@ -6290,9 +6295,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -6290,9 +6295,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
...@@ -11350,10 +11355,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -11350,10 +11355,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* Set the MSR load/store lists to match L0's settings. * Set the MSR load/store lists to match L0's settings.
*/ */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
set_cr4_guest_host_mask(vmx); set_cr4_guest_host_mask(vmx);
...@@ -12457,8 +12462,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -12457,8 +12462,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
/* Update any VMCS fields that might have changed while L2 ran */ /* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1) if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment