Commit fbd2b672 authored by Gautham R. Shenoy's avatar Gautham R. Shenoy Committed by Michael Ellerman

powerpc/smp: Rename init_thread_group_l1_cache_map() to make it generic

init_thread_group_l1_cache_map() initializes the per-cpu cpumask
thread_group_l1_cache_map with the core-siblings which share L1 cache
with the CPU. Make this function generic to the cache-property (L1 or
L2) and update a suitable mask. This is a preparatory patch for the
next patch where we will introduce discovery of thread-groups that
share L2-cache.

No functional change.
Signed-off-by: default avatarGautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1607596739-32439-4-git-send-email-ego@linux.vnet.ibm.com
parent 1fdc1d66
...@@ -866,15 +866,18 @@ static struct thread_groups *__init get_thread_groups(int cpu, ...@@ -866,15 +866,18 @@ static struct thread_groups *__init get_thread_groups(int cpu,
return tg; return tg;
} }
static int init_thread_group_l1_cache_map(int cpu) static int __init init_thread_group_cache_map(int cpu, int cache_property)
{ {
int first_thread = cpu_first_thread_sibling(cpu); int first_thread = cpu_first_thread_sibling(cpu);
int i, cpu_group_start = -1, err = 0; int i, cpu_group_start = -1, err = 0;
struct thread_groups *tg = NULL; struct thread_groups *tg = NULL;
cpumask_var_t *mask;
tg = get_thread_groups(cpu, THREAD_GROUP_SHARE_L1, if (cache_property != THREAD_GROUP_SHARE_L1)
&err); return -EINVAL;
tg = get_thread_groups(cpu, cache_property, &err);
if (!tg) if (!tg)
return err; return err;
...@@ -885,8 +888,8 @@ static int init_thread_group_l1_cache_map(int cpu) ...@@ -885,8 +888,8 @@ static int init_thread_group_l1_cache_map(int cpu)
return -ENODATA; return -ENODATA;
} }
zalloc_cpumask_var_node(&per_cpu(thread_group_l1_cache_map, cpu), mask = &per_cpu(thread_group_l1_cache_map, cpu);
GFP_KERNEL, cpu_to_node(cpu)); zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++) { for (i = first_thread; i < first_thread + threads_per_core; i++) {
int i_group_start = get_cpu_thread_group_start(i, tg); int i_group_start = get_cpu_thread_group_start(i, tg);
...@@ -897,7 +900,7 @@ static int init_thread_group_l1_cache_map(int cpu) ...@@ -897,7 +900,7 @@ static int init_thread_group_l1_cache_map(int cpu)
} }
if (i_group_start == cpu_group_start) if (i_group_start == cpu_group_start)
cpumask_set_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)); cpumask_set_cpu(i, *mask);
} }
return 0; return 0;
...@@ -976,7 +979,7 @@ static int init_big_cores(void) ...@@ -976,7 +979,7 @@ static int init_big_cores(void)
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
int err = init_thread_group_l1_cache_map(cpu); int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment