Commit 9df56f19 authored by Jason Wang's avatar Jason Wang Committed by H. Peter Anvin

x86: Correctly detect hypervisor

We try to handle the hypervisor compatibility mode by detecting hypervisor
through a specific order. This is not robust, since hypervisors may implement
each others features.

This patch tries to handle this situation by always choosing the last one in the
CPUID leaves. This is done by letting .detect() return a priority instead of
true/false and just re-using the CPUID leaf where the signature were found as
the priority (or 1 if it was found by DMI). Then we can just pick hypervisor who
has the highest priority. Other sophisticated detection method could also be
implemented on top.

Suggested by H. Peter Anvin and Paolo Bonzini.
Acked-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Doug Covelli <dcovelli@vmware.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dan Hecht <dhecht@vmware.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Link: http://lkml.kernel.org/r/1374742475-2485-4-git-send-email-jasowang@redhat.comSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 1085ba7f
...@@ -33,7 +33,7 @@ struct hypervisor_x86 { ...@@ -33,7 +33,7 @@ struct hypervisor_x86 {
const char *name; const char *name;
/* Detection routine */ /* Detection routine */
bool (*detect)(void); uint32_t (*detect)(void);
/* Adjust CPU feature bits (run once per CPU) */ /* Adjust CPU feature bits (run once per CPU) */
void (*set_cpu_features)(struct cpuinfo_x86 *); void (*set_cpu_features)(struct cpuinfo_x86 *);
......
...@@ -25,11 +25,6 @@ ...@@ -25,11 +25,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
/*
* Hypervisor detect order. This is specified explicitly here because
* some hypervisors might implement compatibility modes for other
* hypervisors and therefore need to be detected in specific sequence.
*/
static const __initconst struct hypervisor_x86 * const hypervisors[] = static const __initconst struct hypervisor_x86 * const hypervisors[] =
{ {
#ifdef CONFIG_XEN_PVHVM #ifdef CONFIG_XEN_PVHVM
...@@ -49,15 +44,19 @@ static inline void __init ...@@ -49,15 +44,19 @@ static inline void __init
detect_hypervisor_vendor(void) detect_hypervisor_vendor(void)
{ {
const struct hypervisor_x86 *h, * const *p; const struct hypervisor_x86 *h, * const *p;
uint32_t pri, max_pri = 0;
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
h = *p; h = *p;
if (h->detect()) { pri = h->detect();
if (pri != 0 && pri > max_pri) {
max_pri = pri;
x86_hyper = h; x86_hyper = h;
printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
break;
} }
} }
if (max_pri)
printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name);
} }
void init_hypervisor(struct cpuinfo_x86 *c) void init_hypervisor(struct cpuinfo_x86 *c)
......
...@@ -27,20 +27,23 @@ ...@@ -27,20 +27,23 @@
struct ms_hyperv_info ms_hyperv; struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv); EXPORT_SYMBOL_GPL(ms_hyperv);
static bool __init ms_hyperv_platform(void) static uint32_t __init ms_hyperv_platform(void)
{ {
u32 eax; u32 eax;
u32 hyp_signature[3]; u32 hyp_signature[3];
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return false; return 0;
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
return eax >= HYPERV_CPUID_MIN && if (eax >= HYPERV_CPUID_MIN &&
eax <= HYPERV_CPUID_MAX && eax <= HYPERV_CPUID_MAX &&
!memcmp("Microsoft Hv", hyp_signature, 12); !memcmp("Microsoft Hv", hyp_signature, 12))
return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
return 0;
} }
static cycle_t read_hv_clock(struct clocksource *arg) static cycle_t read_hv_clock(struct clocksource *arg)
......
...@@ -93,7 +93,7 @@ static void __init vmware_platform_setup(void) ...@@ -93,7 +93,7 @@ static void __init vmware_platform_setup(void)
* serial key should be enough, as this will always have a VMware * serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor. * specific string when running under VMware hypervisor.
*/ */
static bool __init vmware_platform(void) static uint32_t __init vmware_platform(void)
{ {
if (cpu_has_hypervisor) { if (cpu_has_hypervisor) {
unsigned int eax; unsigned int eax;
...@@ -102,12 +102,12 @@ static bool __init vmware_platform(void) ...@@ -102,12 +102,12 @@ static bool __init vmware_platform(void)
cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
&hyper_vendor_id[1], &hyper_vendor_id[2]); &hyper_vendor_id[1], &hyper_vendor_id[2]);
if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
return true; return CPUID_VMWARE_INFO_LEAF;
} else if (dmi_available && dmi_name_in_serial("VMware") && } else if (dmi_available && dmi_name_in_serial("VMware") &&
__vmware_platform()) __vmware_platform())
return true; return 1;
return false; return 0;
} }
/* /*
......
...@@ -498,11 +498,9 @@ void __init kvm_guest_init(void) ...@@ -498,11 +498,9 @@ void __init kvm_guest_init(void)
#endif #endif
} }
static bool __init kvm_detect(void) static uint32_t __init kvm_detect(void)
{ {
if (!kvm_para_available()) return kvm_cpuid_base();
return false;
return true;
} }
const struct hypervisor_x86 x86_hyper_kvm __refconst = { const struct hypervisor_x86 x86_hyper_kvm __refconst = {
......
...@@ -1720,15 +1720,12 @@ static void __init xen_hvm_guest_init(void) ...@@ -1720,15 +1720,12 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_mmu_ops(); xen_hvm_init_mmu_ops();
} }
static bool __init xen_hvm_platform(void) static uint32_t __init xen_hvm_platform(void)
{ {
if (xen_pv_domain()) if (xen_pv_domain())
return false; return 0;
if (!xen_cpuid_base())
return false;
return true; return xen_cpuid_base();
} }
bool xen_hvm_need_lapic(void) bool xen_hvm_need_lapic(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment