Commit 30fc4ca2 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/topology: use cpu_topology array instead of per cpu variable

CPU topology information like cpu to node mapping must be setup in
setup_arch already. Topology information is currently made available
with a per cpu variable; this however will not work when the
initialization will be moved to setup_arch, since the generic percpu
setup will be done much later.

Therefore convert back to a cpu_topology array.
Reviewed-by: default avatarMichael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent af51160e
...@@ -22,18 +22,17 @@ struct cpu_topology_s390 { ...@@ -22,18 +22,17 @@ struct cpu_topology_s390 {
cpumask_t drawer_mask; cpumask_t drawer_mask;
}; };
DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
#define topology_sibling_cpumask(cpu) \ #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
(&per_cpu(cpu_topology, cpu).thread_mask) #define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) #define topology_book_id(cpu) (cpu_topology[cpu].book_id)
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) #define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) #define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id) #define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
#define mc_capable() 1 #define mc_capable() 1
...@@ -65,7 +64,7 @@ static inline void topology_expect_change(void) { } ...@@ -65,7 +64,7 @@ static inline void topology_expect_change(void) { }
#define cpu_to_node cpu_to_node #define cpu_to_node cpu_to_node
static inline int cpu_to_node(int cpu) static inline int cpu_to_node(int cpu)
{ {
return per_cpu(cpu_topology, cpu).node_id; return cpu_topology[cpu].node_id;
} }
/* Returns a pointer to the cpumask of CPUs on node 'node'. */ /* Returns a pointer to the cpumask of CPUs on node 'node'. */
......
...@@ -41,15 +41,15 @@ static bool topology_enabled = true; ...@@ -41,15 +41,15 @@ static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn); static DECLARE_WORK(topology_work, topology_work_fn);
/* /*
* Socket/Book linked lists and per_cpu(cpu_topology) updates are * Socket/Book linked lists and cpu_topology updates are
* protected by "sched_domains_mutex". * protected by "sched_domains_mutex".
*/ */
static struct mask_info socket_info; static struct mask_info socket_info;
static struct mask_info book_info; static struct mask_info book_info;
static struct mask_info drawer_info; static struct mask_info drawer_info;
DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); EXPORT_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{ {
...@@ -97,7 +97,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core, ...@@ -97,7 +97,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0) if (lcpu < 0)
continue; continue;
for (i = 0; i <= smp_cpu_mtid; i++) { for (i = 0; i <= smp_cpu_mtid; i++) {
topo = &per_cpu(cpu_topology, lcpu + i); topo = &cpu_topology[lcpu + i];
topo->drawer_id = drawer->id; topo->drawer_id = drawer->id;
topo->book_id = book->id; topo->book_id = book->id;
topo->socket_id = socket->id; topo->socket_id = socket->id;
...@@ -220,7 +220,7 @@ static void update_cpu_masks(void) ...@@ -220,7 +220,7 @@ static void update_cpu_masks(void)
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
topo = &per_cpu(cpu_topology, cpu); topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu); topo->thread_mask = cpu_thread_map(cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu); topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu); topo->book_mask = cpu_group_map(&book_info, cpu);
...@@ -394,23 +394,23 @@ int topology_cpu_init(struct cpu *cpu) ...@@ -394,23 +394,23 @@ int topology_cpu_init(struct cpu *cpu)
static const struct cpumask *cpu_thread_mask(int cpu) static const struct cpumask *cpu_thread_mask(int cpu)
{ {
return &per_cpu(cpu_topology, cpu).thread_mask; return &cpu_topology[cpu].thread_mask;
} }
const struct cpumask *cpu_coregroup_mask(int cpu) const struct cpumask *cpu_coregroup_mask(int cpu)
{ {
return &per_cpu(cpu_topology, cpu).core_mask; return &cpu_topology[cpu].core_mask;
} }
static const struct cpumask *cpu_book_mask(int cpu) static const struct cpumask *cpu_book_mask(int cpu)
{ {
return &per_cpu(cpu_topology, cpu).book_mask; return &cpu_topology[cpu].book_mask;
} }
static const struct cpumask *cpu_drawer_mask(int cpu) static const struct cpumask *cpu_drawer_mask(int cpu)
{ {
return &per_cpu(cpu_topology, cpu).drawer_mask; return &cpu_topology[cpu].drawer_mask;
} }
static int __init early_parse_topology(char *p) static int __init early_parse_topology(char *p)
......
...@@ -355,7 +355,7 @@ static struct toptree *toptree_from_topology(void) ...@@ -355,7 +355,7 @@ static struct toptree *toptree_from_topology(void)
phys = toptree_new(TOPTREE_ID_PHYS, 1); phys = toptree_new(TOPTREE_ID_PHYS, 1);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
top = &per_cpu(cpu_topology, cpu); top = &cpu_topology[cpu];
node = toptree_get_child(phys, 0); node = toptree_get_child(phys, 0);
drawer = toptree_get_child(node, top->drawer_id); drawer = toptree_get_child(node, top->drawer_id);
book = toptree_get_child(drawer, top->book_id); book = toptree_get_child(drawer, top->book_id);
...@@ -378,7 +378,7 @@ static void topology_add_core(struct toptree *core) ...@@ -378,7 +378,7 @@ static void topology_add_core(struct toptree *core)
int cpu; int cpu;
for_each_cpu(cpu, &core->mask) { for_each_cpu(cpu, &core->mask) {
top = &per_cpu(cpu_topology, cpu); top = &cpu_topology[cpu];
cpumask_copy(&top->thread_mask, &core->mask); cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask); cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask); cpumask_copy(&top->book_mask, &core_book(core)->mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment