Commit 66a6950f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Introduce kvm_cpu_caps to replace runtime CPUID masking

Calculate the CPUID masks for KVM_GET_SUPPORTED_CPUID at load time using
what is effectively a KVM-adjusted copy of boot_cpu_data, or more
precisely, the x86_capability array in boot_cpu_data.

In terms of KVM support, the vast majority of CPUID feature bits are
constant, and *all* feature support is known at KVM load time.  Rather
than apply boot_cpu_data, which is effectively read-only after init,
at runtime, copy it into a KVM-specific array and use *that* to mask
CPUID registers.

In additional to consolidating the masking, kvm_cpu_caps can be adjusted
by SVM/VMX at load time and thus eliminate all feature bit manipulation
in ->set_supported_cpuid().

Opportunistically clean up a few warts:

  - Replace bare "unsigned" with "unsigned int" when a feature flag is
    captured in a local variable, e.g. f_nx.

  - Sort the CPUID masks by function, index and register (alphabetically
    for registers, i.e. EBX comes before ECX/EDX).

  - Remove the superfluous /* cpuid 7.0.ecx */ comments.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
[Call kvm_set_cpu_caps from kvm_x86_ops->hardware_setup due to fixed
 GBPAGES patch. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9e6d01c2
This diff is collapsed.
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/processor.h> #include <asm/processor.h>
extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
void kvm_set_cpu_caps(void);
int kvm_update_cpuid(struct kvm_vcpu *vcpu); int kvm_update_cpuid(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index); u32 function, u32 index);
...@@ -172,7 +175,8 @@ static __always_inline void cpuid_entry_mask(struct kvm_cpuid_entry2 *entry, ...@@ -172,7 +175,8 @@ static __always_inline void cpuid_entry_mask(struct kvm_cpuid_entry2 *entry,
{ {
u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
*reg &= boot_cpu_data.x86_capability[leaf]; BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
*reg &= kvm_cpu_caps[leaf];
} }
static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
...@@ -262,4 +266,20 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) ...@@ -262,4 +266,20 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
} }
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
{
unsigned int x86_leaf = x86_feature / 32;
reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
}
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
{
unsigned int x86_leaf = x86_feature / 32;
reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
}
#endif #endif
...@@ -1479,6 +1479,8 @@ static __init int svm_hardware_setup(void) ...@@ -1479,6 +1479,8 @@ static __init int svm_hardware_setup(void)
pr_info("Virtual GIF supported\n"); pr_info("Virtual GIF supported\n");
} }
kvm_set_cpu_caps();
return 0; return 0;
err: err:
......
...@@ -7818,6 +7818,8 @@ static __init int hardware_setup(void) ...@@ -7818,6 +7818,8 @@ static __init int hardware_setup(void)
return r; return r;
} }
kvm_set_cpu_caps();
r = alloc_kvm_area(); r = alloc_kvm_area();
if (r) if (r)
nested_vmx_hardware_unsetup(); nested_vmx_hardware_unsetup();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment