Commit 802145c5 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Rename vmx_uret_msr's "index" to "slot"

Rename "index" to "slot" in struct vmx_uret_msr to align with the
terminology used by common x86's kvm_user_return_msrs, and to avoid
conflating "MSR's ECX index" with "MSR's index into an array".

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923180409.32255-16-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 14a61b64
...@@ -616,7 +616,7 @@ static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) ...@@ -616,7 +616,7 @@ static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
int i; int i;
for (i = 0; i < vmx->nr_uret_msrs; ++i) for (i = 0; i < vmx->nr_uret_msrs; ++i)
if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].index] == msr) if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
return i; return i;
return -1; return -1;
} }
...@@ -640,7 +640,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, ...@@ -640,7 +640,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
msr->data = data; msr->data = data;
if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) { if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
preempt_disable(); preempt_disable();
ret = kvm_set_user_return_msr(msr->index, msr->data, msr->mask); ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
preempt_enable(); preempt_enable();
if (ret) if (ret)
msr->data = old_msr_data; msr->data = old_msr_data;
...@@ -1143,7 +1143,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -1143,7 +1143,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
if (!vmx->guest_uret_msrs_loaded) { if (!vmx->guest_uret_msrs_loaded) {
vmx->guest_uret_msrs_loaded = true; vmx->guest_uret_msrs_loaded = true;
for (i = 0; i < vmx->nr_active_uret_msrs; ++i) for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
kvm_set_user_return_msr(vmx->guest_uret_msrs[i].index, kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot,
vmx->guest_uret_msrs[i].data, vmx->guest_uret_msrs[i].data,
vmx->guest_uret_msrs[i].mask); vmx->guest_uret_msrs[i].mask);
...@@ -6730,7 +6730,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -6730,7 +6730,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
if (wrmsr_safe(index, data_low, data_high) < 0) if (wrmsr_safe(index, data_low, data_high) < 0)
continue; continue;
vmx->guest_uret_msrs[j].index = i; vmx->guest_uret_msrs[j].slot = i;
vmx->guest_uret_msrs[j].data = 0; vmx->guest_uret_msrs[j].data = 0;
switch (index) { switch (index) {
case MSR_IA32_TSX_CTRL: case MSR_IA32_TSX_CTRL:
......
...@@ -36,7 +36,7 @@ struct vmx_msrs { ...@@ -36,7 +36,7 @@ struct vmx_msrs {
}; };
struct vmx_uret_msr { struct vmx_uret_msr {
unsigned index; unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
u64 data; u64 data;
u64 mask; u64 mask;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment