Commit 22d63660 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/cpu: Use common topology code for Intel

Intel CPUs use either topology leaf 0xb/0x1f evaluation or the legacy
SMP/HT evaluation based on CPUID leaf 0x1/0x4.

Move it over to the consolidated topology code and remove the random
topology hacks which are sprinkled into the Intel and the common code.

No functional change intended.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarSohil Mehta <sohil.mehta@intel.com>
Tested-by: default avatarMichael Kelley <mhklinux@outlook.com>
Tested-by: default avatarZhang Rui <rui.zhang@intel.com>
Tested-by: default avatarWang Wendy <wendy.wang@intel.com>
Tested-by: default avatarK Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lore.kernel.org/r/20240212153624.893644349@linutronix.de
parent 3d410094
...@@ -793,19 +793,6 @@ static void get_model_name(struct cpuinfo_x86 *c) ...@@ -793,19 +793,6 @@ static void get_model_name(struct cpuinfo_x86 *c)
*(s + 1) = '\0'; *(s + 1) = '\0';
} }
void detect_num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax, ebx, ecx, edx;
c->x86_max_cores = 1;
if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
return;
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
if (eax & 0x1f)
c->x86_max_cores = (eax >> 26) + 1;
}
void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
{ {
unsigned int n, dummy, ebx, ecx, edx, l2size; unsigned int n, dummy, ebx, ecx, edx, l2size;
...@@ -867,54 +854,6 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c) ...@@ -867,54 +854,6 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
} }
int detect_ht_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
if (!cpu_has(c, X86_FEATURE_HT))
return -1;
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
return -1;
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
return -1;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1)
pr_info_once("CPU0: Hyper-Threading is disabled\n");
#endif
return 0;
}
void detect_ht(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
int index_msb, core_bits;
if (topo_is_converted(c))
return;
if (detect_ht_early(c) < 0)
return;
index_msb = get_count_order(smp_num_siblings);
c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
index_msb = get_count_order(smp_num_siblings);
core_bits = get_count_order(c->x86_max_cores);
c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
((1 << core_bits) - 1);
#endif
}
static void get_cpu_vendor(struct cpuinfo_x86 *c) static void get_cpu_vendor(struct cpuinfo_x86 *c)
{ {
char *v = c->x86_vendor_id; char *v = c->x86_vendor_id;
...@@ -1899,10 +1838,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) ...@@ -1899,10 +1838,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
c->x86, c->x86_model); c->x86, c->x86_model);
} }
#ifdef CONFIG_X86_64
detect_ht(c);
#endif
x86_init_rdrand(c); x86_init_rdrand(c);
setup_pku(c); setup_pku(c);
setup_cet(c); setup_cet(c);
......
...@@ -76,11 +76,7 @@ extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); ...@@ -76,11 +76,7 @@ extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c); extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
extern int detect_extended_topology(struct cpuinfo_x86 *c); extern int detect_extended_topology(struct cpuinfo_x86 *c);
extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c); void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c);
......
...@@ -315,13 +315,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -315,13 +315,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
} }
check_memory_type_self_snoop_errata(c); check_memory_type_self_snoop_errata(c);
/*
* Get the number of SMT siblings early from the extended topology
* leaf, if available. Otherwise try the legacy SMT detection.
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
} }
static void bsp_init_intel(struct cpuinfo_x86 *c) static void bsp_init_intel(struct cpuinfo_x86 *c)
...@@ -603,24 +596,6 @@ static void init_intel(struct cpuinfo_x86 *c) ...@@ -603,24 +596,6 @@ static void init_intel(struct cpuinfo_x86 *c)
intel_workarounds(c); intel_workarounds(c);
/*
* Detect the extended topology information if available. This
* will reinitialise the initial_apicid which will be used
* in init_intel_cacheinfo()
*/
detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
detect_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
init_intel_cacheinfo(c); init_intel_cacheinfo(c);
if (c->cpuid_level > 9) { if (c->cpuid_level > 9) {
......
...@@ -59,28 +59,6 @@ static int detect_extended_topology_leaf(struct cpuinfo_x86 *c) ...@@ -59,28 +59,6 @@ static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
} }
#endif #endif
int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx;
int leaf;
leaf = detect_extended_topology_leaf(c);
if (leaf < 0)
return -1;
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
/*
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->topo.initial_apicid = edx;
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
#endif
return 0;
}
/* /*
* Check for extended topology enumeration cpuid leaf, and if it * Check for extended topology enumeration cpuid leaf, and if it
* exists, use it for populating initial_apicid and cpu topology * exists, use it for populating initial_apicid and cpu topology
......
...@@ -71,7 +71,6 @@ bool topo_is_converted(struct cpuinfo_x86 *c) ...@@ -71,7 +71,6 @@ bool topo_is_converted(struct cpuinfo_x86 *c)
/* Temporary until everything is converted over. */ /* Temporary until everything is converted over. */
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_HYGON: case X86_VENDOR_HYGON:
return false; return false;
default: default:
...@@ -136,6 +135,10 @@ static void parse_topology(struct topo_scan *tscan, bool early) ...@@ -136,6 +135,10 @@ static void parse_topology(struct topo_scan *tscan, bool early)
case X86_VENDOR_ZHAOXIN: case X86_VENDOR_ZHAOXIN:
parse_legacy(tscan); parse_legacy(tscan);
break; break;
case X86_VENDOR_INTEL:
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
parse_legacy(tscan);
break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment