Commit c47f892d authored by Srikar Dronamraju's avatar Srikar Dronamraju Committed by Michael Ellerman

powerpc/smp: Reintroduce cpu_core_mask

Daniel reported that with Commit 4ca234a9 ("powerpc/smp: Stop
updating cpu_core_mask") QEMU was unable to set single NUMA node SMP
topologies such as:
 -smp 8,maxcpus=8,cores=2,threads=2,sockets=2
 i.e he expected 2 sockets in one NUMA node.

The above commit helped to reduce boot time on Large Systems for
example 4096 vCPU single socket QEMU instance. PAPR is silent on
having more than one socket within a NUMA node.

cpu_core_mask and cpu_cpu_mask for any CPU would be same unless the
number of sockets is different from the number of NUMA nodes.

One option is to reintroduce cpu_core_mask but use a slightly
different method to arrive at the cpu_core_mask. Previously each CPU's
chip-id would be compared with all other CPU's chip-id to verify if
both the CPUs were related at the chip level. Now if a CPU 'A' is
found related / (unrelated) to another CPU 'B', all the thread
siblings of 'A' and thread siblings of 'B' are automatically marked as
related / (unrelated).

Also if a platform doesn't support ibm,chip-id property, i.e its
cpu_to_chip_id returns -1, cpu_core_map holds a copy of
cpu_cpu_mask().

Fixes: 4ca234a9 ("powerpc/smp: Stop updating cpu_core_mask")
Reported-by: default avatarDaniel Henrique Barboza <danielhb413@gmail.com>
Signed-off-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Tested-by: default avatarDaniel Henrique Barboza <danielhb413@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210415120934.232271-2-srikar@linux.vnet.ibm.com
parent e9e16917
...@@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu) ...@@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
return per_cpu(cpu_sibling_map, cpu); return per_cpu(cpu_sibling_map, cpu);
} }
static inline struct cpumask *cpu_core_mask(int cpu)
{
return per_cpu(cpu_core_map, cpu);
}
static inline struct cpumask *cpu_l2_cache_mask(int cpu) static inline struct cpumask *cpu_l2_cache_mask(int cpu)
{ {
return per_cpu(cpu_l2_cache_map, cpu); return per_cpu(cpu_l2_cache_map, cpu);
......
...@@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
local_memory_node(numa_cpu_lookup_table[cpu])); local_memory_node(numa_cpu_lookup_table[cpu]));
} }
#endif #endif
/*
* cpu_core_map is now more updated and exists only since
* its been exported for long. It only will have a snapshot
* of cpu_cpu_mask.
*/
cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
} }
/* Init the cpumasks so the boot CPU is related to itself */ /* Init the cpumasks so the boot CPU is related to itself */
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (has_coregroup_support()) if (has_coregroup_support())
cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
...@@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu) ...@@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu)
set_cpus_unrelated(cpu, i, cpu_smallcore_mask); set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
} }
for_each_cpu(i, cpu_core_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_core_mask);
if (has_coregroup_support()) { if (has_coregroup_support()) {
for_each_cpu(i, cpu_coregroup_mask(cpu)) for_each_cpu(i, cpu_coregroup_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_coregroup_mask); set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
...@@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask) ...@@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
static void add_cpu_to_masks(int cpu) static void add_cpu_to_masks(int cpu)
{ {
struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
int first_thread = cpu_first_thread_sibling(cpu); int first_thread = cpu_first_thread_sibling(cpu);
int chip_id = cpu_to_chip_id(cpu);
cpumask_var_t mask; cpumask_var_t mask;
bool ret;
int i; int i;
/* /*
...@@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu) ...@@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu)
add_cpu_to_smallcore_masks(cpu); add_cpu_to_smallcore_masks(cpu);
/* In CPU-hotplug path, hence use GFP_ATOMIC */ /* In CPU-hotplug path, hence use GFP_ATOMIC */
alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
update_mask_by_l2(cpu, &mask); update_mask_by_l2(cpu, &mask);
if (has_coregroup_support()) if (has_coregroup_support())
update_coregroup_mask(cpu, &mask); update_coregroup_mask(cpu, &mask);
if (chip_id == -1 || !ret) {
cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
goto out;
}
if (shared_caches)
submask_fn = cpu_l2_cache_mask;
/* Update core_mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
/* Skip all CPUs already part of current CPU core mask */
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
for_each_cpu(i, mask) {
if (chip_id == cpu_to_chip_id(i)) {
or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
cpumask_andnot(mask, mask, submask_fn(i));
} else {
cpumask_andnot(mask, mask, cpu_core_mask(i));
}
}
out:
free_cpumask_var(mask); free_cpumask_var(mask);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment