Commit 4e66c0cb authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Add support for reverse CPUID lookup of scattered features

Introduce a scheme that allows KVM's CPUID magic to support features
that are scattered in the kernel's feature words.  To advertise and/or
query guest support for CPUID-based features, KVM requires the bit
number of an X86_FEATURE_* to match the bit number in its associated
CPUID entry.  For scattered features, this does not hold true.

Add a framework to allow defining KVM-only words, stored in
kvm_cpu_caps after the shared kernel caps, that can be used to gather
the scattered feature bits by translating X86_FEATURE_* flags into their
KVM-defined feature.

Note, because reverse_cpuid_check() effectively forces kvm_cpu_caps
lookups to be resolved at compile time, there is no runtime cost for
translating from kernel-defined to kvm-defined features.

More details here:  https://lkml.kernel.org/r/X/jxCOLG+HUO4QlZ@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarKai Huang <kai.huang@intel.com>
Message-Id: <16cad8d00475f67867fb36701fc7fb7c1ec86ce1.1618196135.git.kai.huang@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 00e7646c
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
* Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
* aligned to sizeof(unsigned long) because it's not accessed via bitops. * aligned to sizeof(unsigned long) because it's not accessed via bitops.
*/ */
u32 kvm_cpu_caps[NCAPINTS] __read_mostly; u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
EXPORT_SYMBOL_GPL(kvm_cpu_caps); EXPORT_SYMBOL_GPL(kvm_cpu_caps);
static u32 xstate_required_size(u64 xstate_bv, bool compacted) static u32 xstate_required_size(u64 xstate_bv, bool compacted)
...@@ -53,6 +53,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) ...@@ -53,6 +53,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
} }
#define F feature_bit #define F feature_bit
#define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0)
static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index)
...@@ -347,13 +348,13 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, ...@@ -347,13 +348,13 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
return r; return r;
} }
static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
static __always_inline void __kvm_cpu_cap_mask(enum cpuid_leafs leaf)
{ {
const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
struct kvm_cpuid_entry2 entry; struct kvm_cpuid_entry2 entry;
reverse_cpuid_check(leaf); reverse_cpuid_check(leaf);
kvm_cpu_caps[leaf] &= mask;
cpuid_count(cpuid.function, cpuid.index, cpuid_count(cpuid.function, cpuid.index,
&entry.eax, &entry.ebx, &entry.ecx, &entry.edx); &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
...@@ -361,6 +362,26 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) ...@@ -361,6 +362,26 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
} }
static __always_inline void kvm_cpu_cap_init_scattered(enum cpuid_leafs leaf, u32 mask)
{
/* Use kvm_cpu_cap_mask for non-scattered leafs. */
BUILD_BUG_ON(leaf < NCAPINTS);
kvm_cpu_caps[leaf] = mask;
__kvm_cpu_cap_mask(leaf);
}
static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
{
/* Use kvm_cpu_cap_init_scattered for scattered leafs. */
BUILD_BUG_ON(leaf >= NCAPINTS);
kvm_cpu_caps[leaf] &= mask;
__kvm_cpu_cap_mask(leaf);
}
void kvm_set_cpu_caps(void) void kvm_set_cpu_caps(void)
{ {
unsigned int f_nx = is_efer_nx() ? F(NX) : 0; unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
...@@ -371,12 +392,13 @@ void kvm_set_cpu_caps(void) ...@@ -371,12 +392,13 @@ void kvm_set_cpu_caps(void)
unsigned int f_gbpages = 0; unsigned int f_gbpages = 0;
unsigned int f_lm = 0; unsigned int f_lm = 0;
#endif #endif
memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
BUILD_BUG_ON(sizeof(kvm_cpu_caps) > BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
sizeof(boot_cpu_data.x86_capability)); sizeof(boot_cpu_data.x86_capability));
memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
sizeof(kvm_cpu_caps)); sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
kvm_cpu_cap_mask(CPUID_1_ECX, kvm_cpu_cap_mask(CPUID_1_ECX,
/* /*
......
...@@ -7,7 +7,20 @@ ...@@ -7,7 +7,20 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <uapi/asm/kvm_para.h> #include <uapi/asm/kvm_para.h>
extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; /*
* Hardware-defined CPUID leafs that are scattered in the kernel, but need to
* be directly used by KVM. Note, these word values conflict with the kernel's
* "bug" caps, but KVM doesn't use those.
*/
enum kvm_only_cpuid_leafs {
NR_KVM_CPU_CAPS = NCAPINTS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
};
#define X86_KVM_FEATURE(w, f) ((w)*32 + (f))
extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
void kvm_set_cpu_caps(void); void kvm_set_cpu_caps(void);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
...@@ -100,6 +113,20 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) ...@@ -100,6 +113,20 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
} }
/*
* Translate feature bits that are scattered in the kernel's cpufeatures word
* into KVM feature words that align with hardware's definitions.
*/
static __always_inline u32 __feature_translate(int x86_feature)
{
return x86_feature;
}
static __always_inline u32 __feature_leaf(int x86_feature)
{
return __feature_translate(x86_feature) / 32;
}
/* /*
* Retrieve the bit mask from an X86_FEATURE_* definition. Features contain * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
* the hardware defined bit number (stored in bits 4:0) and a software defined * the hardware defined bit number (stored in bits 4:0) and a software defined
...@@ -108,6 +135,8 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) ...@@ -108,6 +135,8 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
*/ */
static __always_inline u32 __feature_bit(int x86_feature) static __always_inline u32 __feature_bit(int x86_feature)
{ {
x86_feature = __feature_translate(x86_feature);
reverse_cpuid_check(x86_feature / 32); reverse_cpuid_check(x86_feature / 32);
return 1 << (x86_feature & 31); return 1 << (x86_feature & 31);
} }
...@@ -116,7 +145,7 @@ static __always_inline u32 __feature_bit(int x86_feature) ...@@ -116,7 +145,7 @@ static __always_inline u32 __feature_bit(int x86_feature)
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
{ {
unsigned int x86_leaf = x86_feature / 32; unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf); reverse_cpuid_check(x86_leaf);
return reverse_cpuid[x86_leaf]; return reverse_cpuid[x86_leaf];
...@@ -316,7 +345,7 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) ...@@ -316,7 +345,7 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
{ {
unsigned int x86_leaf = x86_feature / 32; unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf); reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
...@@ -324,7 +353,7 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) ...@@ -324,7 +353,7 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
{ {
unsigned int x86_leaf = x86_feature / 32; unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf); reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
...@@ -332,7 +361,7 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) ...@@ -332,7 +361,7 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
{ {
unsigned int x86_leaf = x86_feature / 32; unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf); reverse_cpuid_check(x86_leaf);
return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment