Commit 7f257c1b authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] debug sched domains before attach

Change the sched-domain debug routine to be called on a per-CPU basis, and
executed before the domain is actually attached to the CPU.  Previously, all
CPUs would have their new domains attached, and then the debug routine would
loop over all of them.

This has two advantages: First, there is no longer any theoretical races: we
are running the debug routine on a domain that isn't yet active, and should
have no racing access from another CPU.  Second, if there is a problem with a
domain, the validator will have a better chance to catch the error and print a
diagnostic _before_ the domain is attached, which may take down the system.

Also, change reporting of detected error conditions to KERN_ERR instead of
KERN_DEBUG, so they have a better chance of being seen in a hang on boot
situation.

The patch also does an unrelated (and harmless) cleanup in migration_thread().
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c919160e
...@@ -3909,8 +3909,7 @@ static int migration_thread(void * data) ...@@ -3909,8 +3909,7 @@ static int migration_thread(void * data)
if (req->type == REQ_MOVE_TASK) { if (req->type == REQ_MOVE_TASK) {
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
__migrate_task(req->task, smp_processor_id(), __migrate_task(req->task, cpu, req->dest_cpu);
req->dest_cpu);
local_irq_enable(); local_irq_enable();
} else if (req->type == REQ_SET_DOMAIN) { } else if (req->type == REQ_SET_DOMAIN) {
rq->sd = req->sd; rq->sd = req->sd;
...@@ -4180,6 +4179,94 @@ int __init migration_init(void) ...@@ -4180,6 +4179,94 @@ int __init migration_init(void)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define SCHED_DOMAIN_DEBUG
#ifdef SCHED_DOMAIN_DEBUG
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
do {
int i;
char str[NR_CPUS];
struct sched_group *group = sd->groups;
cpumask_t groupmask;
cpumask_scnprintf(str, NR_CPUS, sd->span);
cpus_clear(groupmask);
printk(KERN_DEBUG);
for (i = 0; i < level + 1; i++)
printk(" ");
printk("domain %d: ", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
break;
}
printk("span %s\n", str);
if (!cpu_isset(cpu, sd->span))
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
if (!cpu_isset(cpu, group->cpumask))
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
printk(KERN_DEBUG);
for (i = 0; i < level + 2; i++)
printk(" ");
printk("groups:");
do {
if (!group) {
printk("\n");
printk(KERN_ERR "ERROR: group is NULL\n");
break;
}
if (!group->cpu_power) {
printk("\n");
printk(KERN_ERR "ERROR: domain->cpu_power not set\n");
}
if (!cpus_weight(group->cpumask)) {
printk("\n");
printk(KERN_ERR "ERROR: empty group\n");
}
if (cpus_intersects(groupmask, group->cpumask)) {
printk("\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
}
cpus_or(groupmask, groupmask, group->cpumask);
cpumask_scnprintf(str, NR_CPUS, group->cpumask);
printk(" %s", str);
group = group->next;
} while (group != sd->groups);
printk("\n");
if (!cpus_equal(sd->span, groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
level++;
sd = sd->parent;
if (sd) {
if (!cpus_subset(groupmask, sd->span))
printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
}
} while (sd);
}
#else
#define sched_domain_debug(sd, cpu) {}
#endif
/* /*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock. * hold the hotplug lock.
...@@ -4191,6 +4278,8 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) ...@@ -4191,6 +4278,8 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
runqueue_t *rq = cpu_rq(cpu); runqueue_t *rq = cpu_rq(cpu);
int local = 1; int local = 1;
sched_domain_debug(sd, cpu);
spin_lock_irqsave(&rq->lock, flags); spin_lock_irqsave(&rq->lock, flags);
if (cpu == smp_processor_id() || !cpu_online(cpu)) { if (cpu == smp_processor_id() || !cpu_online(cpu)) {
...@@ -4466,96 +4555,6 @@ static void __devinit arch_destroy_sched_domains(void) ...@@ -4466,96 +4555,6 @@ static void __devinit arch_destroy_sched_domains(void)
#endif /* ARCH_HAS_SCHED_DOMAIN */ #endif /* ARCH_HAS_SCHED_DOMAIN */
#define SCHED_DOMAIN_DEBUG
#ifdef SCHED_DOMAIN_DEBUG
static void sched_domain_debug(void)
{
int i;
for_each_online_cpu(i) {
runqueue_t *rq = cpu_rq(i);
struct sched_domain *sd;
int level = 0;
sd = rq->sd;
printk(KERN_DEBUG "CPU%d:\n", i);
do {
int j;
char str[NR_CPUS];
struct sched_group *group = sd->groups;
cpumask_t groupmask;
cpumask_scnprintf(str, NR_CPUS, sd->span);
cpus_clear(groupmask);
printk(KERN_DEBUG);
for (j = 0; j < level + 1; j++)
printk(" ");
printk("domain %d: ", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance");
if (sd->parent)
printk(" ERROR !SD_LOAD_BALANCE domain has parent");
printk("\n");
break;
}
printk("span %s\n", str);
if (!cpu_isset(i, sd->span))
printk(KERN_DEBUG "ERROR domain->span does not contain CPU%d\n", i);
if (!cpu_isset(i, group->cpumask))
printk(KERN_DEBUG "ERROR domain->groups does not contain CPU%d\n", i);
printk(KERN_DEBUG);
for (j = 0; j < level + 2; j++)
printk(" ");
printk("groups:");
do {
if (!group) {
printk(" ERROR: NULL");
break;
}
if (!group->cpu_power)
printk(KERN_DEBUG "ERROR group->cpu_power not set\n");
if (!cpus_weight(group->cpumask))
printk(" ERROR empty group:");
if (cpus_intersects(groupmask, group->cpumask))
printk(" ERROR repeated CPUs:");
cpus_or(groupmask, groupmask, group->cpumask);
cpumask_scnprintf(str, NR_CPUS, group->cpumask);
printk(" %s", str);
group = group->next;
} while (group != sd->groups);
printk("\n");
if (!cpus_equal(sd->span, groupmask))
printk(KERN_DEBUG "ERROR groups don't span domain->span\n");
level++;
sd = sd->parent;
if (sd) {
if (!cpus_subset(groupmask, sd->span))
printk(KERN_DEBUG "ERROR parent span is not a superset of domain->span\n");
}
} while (sd);
}
}
#else
#define sched_domain_debug() {}
#endif
/* /*
* Initial dummy domain for early boot and for hotplug cpu. Being static, * Initial dummy domain for early boot and for hotplug cpu. Being static,
* it is initialized to zero, so all balancing flags are cleared which is * it is initialized to zero, so all balancing flags are cleared which is
...@@ -4598,8 +4597,6 @@ static int update_sched_domains(struct notifier_block *nfb, ...@@ -4598,8 +4597,6 @@ static int update_sched_domains(struct notifier_block *nfb,
/* The hotplug lock is already held by cpu_up/cpu_down */ /* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(); arch_init_sched_domains();
sched_domain_debug();
return NOTIFY_OK; return NOTIFY_OK;
} }
#endif #endif
...@@ -4608,7 +4605,6 @@ void __init sched_init_smp(void) ...@@ -4608,7 +4605,6 @@ void __init sched_init_smp(void)
{ {
lock_cpu_hotplug(); lock_cpu_hotplug();
arch_init_sched_domains(); arch_init_sched_domains();
sched_domain_debug();
unlock_cpu_hotplug(); unlock_cpu_hotplug();
/* XXX: Theoretical race here - CPU may be hotplugged now */ /* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0); hotcpu_notifier(update_sched_domains, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment