Commit ce833b23 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Prepend "MAX_" to MSR array size defines

Add "MAX" to the LOADSTORE and so called SHARED MSR defines to make it
more clear that the define controls the array size, as opposed to the
actual number of valid entries that are in the array.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923180409.32255-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7e34fbd0
...@@ -1041,7 +1041,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, ...@@ -1041,7 +1041,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
if (in_vmcs12_store_list && !in_autostore_list) { if (in_vmcs12_store_list && !in_autostore_list) {
if (autostore->nr == NR_LOADSTORE_MSRS) { if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
/* /*
* Emulated VMEntry does not fail here. Instead a less * Emulated VMEntry does not fail here. Instead a less
* accurate value will be returned by * accurate value will be returned by
......
...@@ -917,8 +917,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -917,8 +917,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
if (!entry_only) if (!entry_only)
j = vmx_find_msr_index(&m->host, msr); j = vmx_find_msr_index(&m->host, msr);
if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) || if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
(j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) { (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
printk_once(KERN_WARNING "Not enough msr switch entries. " printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr); "Can't add msr %x\n", msr);
return; return;
...@@ -6721,7 +6721,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -6721,7 +6721,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
goto free_vpid; goto free_vpid;
} }
BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != MAX_NR_SHARED_MSRS);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i]; u32 index = vmx_msr_index[i];
......
...@@ -23,16 +23,16 @@ extern const u32 vmx_msr_index[]; ...@@ -23,16 +23,16 @@ extern const u32 vmx_msr_index[];
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define NR_SHARED_MSRS 7 #define MAX_NR_SHARED_MSRS 7
#else #else
#define NR_SHARED_MSRS 4 #define MAX_NR_SHARED_MSRS 4
#endif #endif
#define NR_LOADSTORE_MSRS 8 #define MAX_NR_LOADSTORE_MSRS 8
struct vmx_msrs { struct vmx_msrs {
unsigned int nr; unsigned int nr;
struct vmx_msr_entry val[NR_LOADSTORE_MSRS]; struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
}; };
struct shared_msr_entry { struct shared_msr_entry {
...@@ -196,7 +196,7 @@ struct vcpu_vmx { ...@@ -196,7 +196,7 @@ struct vcpu_vmx {
u32 idt_vectoring_info; u32 idt_vectoring_info;
ulong rflags; ulong rflags;
struct shared_msr_entry guest_msrs[NR_SHARED_MSRS]; struct shared_msr_entry guest_msrs[MAX_NR_SHARED_MSRS];
int nmsrs; int nmsrs;
int save_nmsrs; int save_nmsrs;
bool guest_msrs_ready; bool guest_msrs_ready;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment