Commit 1c300a40 authored by Paolo Bonzini's avatar Paolo Bonzini

x86, kvm: cache the base of the KVM cpuid leaves

It is unnecessary to go through hypervisor_cpuid_base every time
a leaf is found (which will be every time a feature is requested
after the next patch).

Fixes: 1085ba7f
Cc: stable@vger.kernel.org
Cc: mtosatti@redhat.com
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5f66b620
...@@ -85,28 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, ...@@ -85,28 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
return ret; return ret;
} }
static inline uint32_t kvm_cpuid_base(void)
{
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
if (cpu_has_hypervisor)
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
return 0;
}
static inline bool kvm_para_available(void)
{
return kvm_cpuid_base() != 0;
}
static inline unsigned int kvm_arch_para_features(void) static inline unsigned int kvm_arch_para_features(void)
{ {
return cpuid_eax(KVM_CPUID_FEATURES); return cpuid_eax(KVM_CPUID_FEATURES);
} }
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
bool kvm_para_available(void);
void __init kvm_guest_init(void); void __init kvm_guest_init(void);
void kvm_async_pf_task_wait(u32 token); void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token); void kvm_async_pf_task_wake(u32 token);
...@@ -126,6 +111,11 @@ static inline void kvm_spinlock_init(void) ...@@ -126,6 +111,11 @@ static inline void kvm_spinlock_init(void)
#define kvm_async_pf_task_wait(T) do {} while(0) #define kvm_async_pf_task_wait(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0) #define kvm_async_pf_task_wake(T) do {} while(0)
static inline bool kvm_para_available(void)
{
return 0;
}
static inline u32 kvm_read_and_reset_pf_reason(void) static inline u32 kvm_read_and_reset_pf_reason(void)
{ {
return 0; return 0;
......
...@@ -500,6 +500,33 @@ void __init kvm_guest_init(void) ...@@ -500,6 +500,33 @@ void __init kvm_guest_init(void)
#endif #endif
} }
static noinline uint32_t __kvm_cpuid_base(void)
{
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
if (cpu_has_hypervisor)
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
return 0;
}
static inline uint32_t kvm_cpuid_base(void)
{
static int kvm_cpuid_base = -1;
if (kvm_cpuid_base == -1)
kvm_cpuid_base = __kvm_cpuid_base();
return kvm_cpuid_base;
}
bool kvm_para_available(void)
{
return kvm_cpuid_base() != 0;
}
EXPORT_SYMBOL_GPL(kvm_para_available);
static uint32_t __init kvm_detect(void) static uint32_t __init kvm_detect(void)
{ {
return kvm_cpuid_base(); return kvm_cpuid_base();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment