Commit 2a636a56 authored by Oliver O'Halloran's avatar Oliver O'Halloran Committed by Michael Ellerman

powerpc/smp: Add cpu_l2_cache_map

We want to add an extra level to the CPU scheduler topology to account
for cores which share a cache. To do this we need to build a cpumask
for each CPU that indicates which CPUs share this cache to use as an
input to the scheduler.
Signed-off-by: default avatarOliver O'Halloran <oohall@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent df52f671
...@@ -97,6 +97,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) ...@@ -97,6 +97,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#endif #endif
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
static inline struct cpumask *cpu_sibling_mask(int cpu) static inline struct cpumask *cpu_sibling_mask(int cpu)
...@@ -109,6 +110,11 @@ static inline struct cpumask *cpu_core_mask(int cpu) ...@@ -109,6 +110,11 @@ static inline struct cpumask *cpu_core_mask(int cpu)
return per_cpu(cpu_core_map, cpu); return per_cpu(cpu_core_map, cpu);
} }
static inline struct cpumask *cpu_l2_cache_mask(int cpu)
{
return per_cpu(cpu_l2_cache_map, cpu);
}
extern int cpu_to_core_id(int cpu); extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
......
...@@ -75,9 +75,11 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; ...@@ -75,9 +75,11 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 };
struct thread_info *secondary_ti; struct thread_info *secondary_ti;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */ /* SMP operations for this machine */
...@@ -610,6 +612,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -610,6 +612,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu));
/* /*
...@@ -624,6 +628,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -624,6 +628,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Init the cpumasks so the boot CPU is related to itself */ /* Init the cpumasks so the boot CPU is related to itself */
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (smp_ops && smp_ops->probe) if (smp_ops && smp_ops->probe)
...@@ -907,6 +912,7 @@ static void remove_cpu_from_masks(int cpu) ...@@ -907,6 +912,7 @@ static void remove_cpu_from_masks(int cpu)
/* NB: cpu_core_mask is a superset of the others */ /* NB: cpu_core_mask is a superset of the others */
for_each_cpu(i, cpu_core_mask(cpu)) { for_each_cpu(i, cpu_core_mask(cpu)) {
set_cpus_unrelated(cpu, i, cpu_core_mask); set_cpus_unrelated(cpu, i, cpu_core_mask);
set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
set_cpus_unrelated(cpu, i, cpu_sibling_mask); set_cpus_unrelated(cpu, i, cpu_sibling_mask);
} }
} }
...@@ -929,17 +935,22 @@ static void add_cpu_to_masks(int cpu) ...@@ -929,17 +935,22 @@ static void add_cpu_to_masks(int cpu)
set_cpus_related(i, cpu, cpu_sibling_mask); set_cpus_related(i, cpu, cpu_sibling_mask);
/* /*
* Copy the thread sibling into core sibling mask, and * Copy the thread sibling mask into the cache sibling mask
* add CPUs that share a chip or an L2 to the core sibling * and mark any CPUs that share an L2 with this CPU.
* mask.
*/ */
for_each_cpu(i, cpu_sibling_mask(cpu)) for_each_cpu(i, cpu_sibling_mask(cpu))
set_cpus_related(cpu, i, cpu_l2_cache_mask);
update_mask_by_l2(cpu, cpu_l2_cache_mask);
/*
* Copy the cache sibling mask into core sibling mask and mark
* any CPUs on the same chip as this CPU.
*/
for_each_cpu(i, cpu_l2_cache_mask(cpu))
set_cpus_related(cpu, i, cpu_core_mask); set_cpus_related(cpu, i, cpu_core_mask);
if (chipid == -1) { if (chipid == -1)
update_mask_by_l2(cpu, cpu_core_mask);
return; return;
}
for_each_cpu(i, cpu_online_mask) for_each_cpu(i, cpu_online_mask)
if (cpu_to_chip_id(i) == chipid) if (cpu_to_chip_id(i) == chipid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment