Commit a128a934 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Rename "vmx_find_msr_index" to "vmx_find_loadstore_msr_slot"

Add "loadstore" to vmx_find_msr_index() to differentiate it from the so
called shared MSRs helpers (which will soon be renamed), and replace
"index" with "slot" to better convey that the helper returns slot in the
array, not the MSR index (the value that gets stuffed into ECX).

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923180409.32255-4-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ce833b23
...@@ -939,11 +939,11 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, ...@@ -939,11 +939,11 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
* VM-exit in L0, use the more accurate value. * VM-exit in L0, use the more accurate value.
*/ */
if (msr_index == MSR_IA32_TSC) { if (msr_index == MSR_IA32_TSC) {
int index = vmx_find_msr_index(&vmx->msr_autostore.guest, int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
MSR_IA32_TSC); MSR_IA32_TSC);
if (index >= 0) { if (i >= 0) {
u64 val = vmx->msr_autostore.guest.val[index].value; u64 val = vmx->msr_autostore.guest.val[i].value;
*data = kvm_read_l1_tsc(vcpu, val); *data = kvm_read_l1_tsc(vcpu, val);
return true; return true;
...@@ -1032,12 +1032,12 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, ...@@ -1032,12 +1032,12 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmx_msrs *autostore = &vmx->msr_autostore.guest; struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
bool in_vmcs12_store_list; bool in_vmcs12_store_list;
int msr_autostore_index; int msr_autostore_slot;
bool in_autostore_list; bool in_autostore_list;
int last; int last;
msr_autostore_index = vmx_find_msr_index(autostore, msr_index); msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
in_autostore_list = msr_autostore_index >= 0; in_autostore_list = msr_autostore_slot >= 0;
in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
if (in_vmcs12_store_list && !in_autostore_list) { if (in_vmcs12_store_list && !in_autostore_list) {
...@@ -1058,7 +1058,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, ...@@ -1058,7 +1058,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
autostore->val[last].index = msr_index; autostore->val[last].index = msr_index;
} else if (!in_vmcs12_store_list && in_autostore_list) { } else if (!in_vmcs12_store_list && in_autostore_list) {
last = --autostore->nr; last = --autostore->nr;
autostore->val[msr_autostore_index] = autostore->val[last]; autostore->val[msr_autostore_slot] = autostore->val[last];
} }
} }
......
...@@ -812,7 +812,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, ...@@ -812,7 +812,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
vm_exit_controls_clearbit(vmx, exit); vm_exit_controls_clearbit(vmx, exit);
} }
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
{ {
unsigned int i; unsigned int i;
...@@ -846,7 +846,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -846,7 +846,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
} }
break; break;
} }
i = vmx_find_msr_index(&m->guest, msr); i = vmx_find_loadstore_msr_slot(&m->guest, msr);
if (i < 0) if (i < 0)
goto skip_guest; goto skip_guest;
--m->guest.nr; --m->guest.nr;
...@@ -854,7 +854,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -854,7 +854,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
skip_guest: skip_guest:
i = vmx_find_msr_index(&m->host, msr); i = vmx_find_loadstore_msr_slot(&m->host, msr);
if (i < 0) if (i < 0)
return; return;
...@@ -913,9 +913,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -913,9 +913,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0); wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
} }
i = vmx_find_msr_index(&m->guest, msr); i = vmx_find_loadstore_msr_slot(&m->guest, msr);
if (!entry_only) if (!entry_only)
j = vmx_find_msr_index(&m->host, msr); j = vmx_find_loadstore_msr_slot(&m->host, msr);
if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
(j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
......
...@@ -332,7 +332,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); ...@@ -332,7 +332,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
static inline u8 vmx_get_rvi(void) static inline u8 vmx_get_rvi(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment