Commit 3be7db6a authored by Robert Jennings's avatar Robert Jennings Committed by Benjamin Herrenschmidt

powerpc: VPHN topology change updates all siblings

When an associativity level change is found for one thread, the
siblings threads need to be updated as well.  This is done today
for PRRN in stage_topology_update() but is missing for VPHN in
update_cpu_associativity_changes_mask().  This patch will correctly
update all thread siblings during a topology change.

Without this patch a topology update can result in a CPU in
init_sched_groups_power() getting stuck indefinitely in a loop.

This loop is built in build_sched_groups(). As a result of the thread
moving to a node separate from its siblings the struct sched_group will
have its next pointer set to point to itself rather than the sched_group
struct of the next thread.  This happens because we have a domain without
the SD_OVERLAP flag, which is correct, and a topology that doesn't conform
with reality (threads on the same core assigned to different numa nodes).
When this list is traversed by init_sched_groups_power() it will reach
the thread's sched_group structure and loop indefinitely; the cpu will
be stuck at this point.

The bug was exposed when VPHN was enabled in commit b7abef04 (v3.9).

Cc: <stable@vger.kernel.org> [v3.9+]
Reported-by: default avatarJan Stancek <jstancek@redhat.com>
Signed-off-by: default avatarRobert Jennings <rcj@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 8d7c55d0
...@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu); ...@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps() #define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {} static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {} static inline void uninhibit_secondary_onlining(void) {}
static inline const struct cpumask *cpu_sibling_mask(int cpu)
{
return cpumask_of(cpu);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/cputhreads.h>
#include <asm/sparsemem.h> #include <asm/sparsemem.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void) ...@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
} }
} }
if (changed) { if (changed) {
cpumask_set_cpu(cpu, changes); cpumask_or(changes, changes, cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
} }
} }
...@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data) ...@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
if (!data) if (!data)
return -EINVAL; return -EINVAL;
cpu = get_cpu(); cpu = smp_processor_id();
for (update = data; update; update = update->next) { for (update = data; update; update = update->next) {
if (cpu != update->cpu) if (cpu != update->cpu)
...@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data) ...@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
*/ */
int arch_update_cpu_topology(void) int arch_update_cpu_topology(void)
{ {
unsigned int cpu, changed = 0; unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud; struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus; cpumask_t updated_cpus;
struct device *dev; struct device *dev;
int weight, i = 0; int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask); weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight) if (!weight)
...@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void) ...@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus); cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) { for_each_cpu(cpu, &cpu_associativity_changes_mask) {
ud = &updates[i++]; /*
ud->cpu = cpu; * If siblings aren't flagged for changes, updates list
vphn_get_associativity(cpu, associativity); * will be too short. Skip on this update and set for next
ud->new_nid = associativity_to_nid(associativity); * update.
*/
if (ud->new_nid < 0 || !node_online(ud->new_nid)) if (!cpumask_subset(cpu_sibling_mask(cpu),
ud->new_nid = first_online_node; &cpu_associativity_changes_mask)) {
pr_info("Sibling bits not set for associativity "
"change, cpu%d\n", cpu);
cpumask_or(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask,
cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
continue;
}
ud->old_nid = numa_cpu_lookup_table[cpu]; /* Use associativity from first thread for all siblings */
cpumask_set_cpu(cpu, &updated_cpus); vphn_get_associativity(cpu, associativity);
new_nid = associativity_to_nid(associativity);
if (new_nid < 0 || !node_online(new_nid))
new_nid = first_online_node;
if (new_nid == numa_cpu_lookup_table[cpu]) {
cpumask_andnot(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask,
cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
continue;
}
if (i < weight) for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
ud->next = &updates[i]; ud = &updates[i++];
ud->cpu = sibling;
ud->new_nid = new_nid;
ud->old_nid = numa_cpu_lookup_table[sibling];
cpumask_set_cpu(sibling, &updated_cpus);
if (i < weight)
ud->next = &updates[i];
}
cpu = cpu_last_thread_sibling(cpu);
} }
stop_machine(update_cpu_topology, &updates[0], &updated_cpus); stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment