Commit 22be9cd9 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/numa: use correct type for node_to_cpumask_map

With CONFIG_CPUMASK_OFFSTACK=y cpumask_var_t is a pointer to a CPU mask.
Replace the incorrect type for node_to_cpumask_map with cpumask_t.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent bcee19f4
...@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn); ...@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
int __node_distance(int a, int b); int __node_distance(int a, int b);
void numa_update_cpu_topology(void); void numa_update_cpu_topology(void);
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
extern int numa_debug_enabled; extern int numa_debug_enabled;
#else #else
......
...@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu) ...@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
#define cpumask_of_node cpumask_of_node #define cpumask_of_node cpumask_of_node
static inline const struct cpumask *cpumask_of_node(int node) static inline const struct cpumask *cpumask_of_node(int node)
{ {
return node_to_cpumask_map[node]; return &node_to_cpumask_map[node];
} }
/* /*
......
...@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core) ...@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
cpumask_copy(&top->thread_mask, &core->mask); cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask); cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask); cpumask_copy(&top->book_mask, &core_book(core)->mask);
cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
top->node_id = core_node(core)->id; top->node_id = core_node(core)->id;
} }
} }
...@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa) ...@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
/* Clear all node masks */ /* Clear all node masks */
for (i = 0; i < MAX_NUMNODES; i++) for (i = 0; i < MAX_NUMNODES; i++)
cpumask_clear(node_to_cpumask_map[i]); cpumask_clear(&node_to_cpumask_map[i]);
/* Rebuild all masks */ /* Rebuild all masks */
toptree_for_each(core, numa, CORE) toptree_for_each(core, numa, CORE)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
pg_data_t *node_data[MAX_NUMNODES]; pg_data_t *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; cpumask_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map); EXPORT_SYMBOL(node_to_cpumask_map);
const struct numa_mode numa_mode_plain = { const struct numa_mode numa_mode_plain = {
...@@ -144,7 +144,7 @@ void __init numa_setup(void) ...@@ -144,7 +144,7 @@ void __init numa_setup(void)
static int __init numa_init_early(void) static int __init numa_init_early(void)
{ {
/* Attach all possible CPUs to node 0 for now. */ /* Attach all possible CPUs to node 0 for now. */
cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
return 0; return 0;
} }
early_initcall(numa_init_early); early_initcall(numa_init_early);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment