Commit 70a0b8e7 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] sched: cleanup init_idle()

Clean up init_idle to not use wake_up_forked_process, then undo all the stuff
that call does.  Instead, do everything in init_idle.

Make double_rq_lock depend on CONFIG_SMP because it is no longer used on UP.
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b2a0e913
......@@ -439,8 +439,6 @@ smp_boot_one_cpu(int cpuid)
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpuid);
wake_up_forked_process(idle);
init_idle(idle, cpuid);
unhash_process(idle);
......
......@@ -804,16 +804,13 @@ static int __init do_boot_cpu(int apicid)
idle = fork_by_hand();
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
wake_up_forked_process(idle);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
/* Make this the idle thread */
init_idle(idle, cpu);
idle->thread.eip = (unsigned long) start_secondary;
/* Remove it from the pidhash */
unhash_process(idle);
/* start_eip had better be page-aligned! */
......
......@@ -591,11 +591,12 @@ do_boot_cpu(__u8 cpu)
if(IS_ERR(idle))
panic("failed fork for CPU%d", cpu);
wake_up_forked_process(idle);
/* Make this the idle thread */
init_idle(idle, cpu);
idle->thread.eip = (unsigned long) start_secondary;
/* Remove it from the pidhash */
unhash_process(idle);
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
......
......@@ -400,14 +400,11 @@ do_boot_cpu (int sapicid, int cpu)
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
wake_up_forked_process(c_idle.idle);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
/* Make this the idle thread */
init_idle(c_idle.idle, cpu);
/* Remove it from the pidhash */
unhash_process(c_idle.idle);
task_for_booting_cpu = c_idle.idle;
......
......@@ -279,14 +279,10 @@ static int __init do_boot_cpu(int cpu)
if (IS_ERR(idle))
panic("failed fork for CPU %d\n", cpu);
wake_up_forked_process(idle);
/*
* We remove it from the pidhash and the runqueue once we've
* got the process:
*/
/* Make this the idle thread */
init_idle(idle, cpu);
/* Remove it from the pidhash */
unhash_process(idle);
prom_boot_secondary(cpu, idle);
......
......@@ -525,7 +525,6 @@ int __init smp_boot_one_cpu(int cpuid)
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
wake_up_forked_process(idle);
init_idle(idle, cpuid);
unhash_process(idle);
idle->thread_info->cpu = cpuid;
......
......@@ -375,8 +375,6 @@ int __cpu_up(unsigned int cpu)
p = copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
wake_up_forked_process(p);
init_idle(p, cpu);
unhash_process(p);
......
......@@ -811,7 +811,6 @@ static void __init smp_create_idle(unsigned int cpu)
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
wake_up_forked_process(p);
init_idle(p, cpu);
unhash_process(p);
......
......@@ -574,9 +574,12 @@ static void __init smp_create_idle(unsigned int cpu)
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
wake_up_forked_process(p);
/* Make this the idle thread */
init_idle(p, cpu);
/* Remove it from the pidhash */
unhash_process(p);
current_set[cpu] = p;
}
......
......@@ -106,8 +106,6 @@ int __cpu_up(unsigned int cpu)
if (IS_ERR(tsk))
panic("Failed forking idle task for cpu %d\n", cpu);
wake_up_forked_process(tsk);
init_idle(tsk, cpu);
unhash_process(tsk);
......
......@@ -578,15 +578,12 @@ static void __init do_boot_cpu (int apicid)
idle = fork_by_hand();
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
wake_up_forked_process(idle);
x86_cpu_to_apicid[cpu] = apicid;
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
/* Make this the idle thread */
init_idle(idle,cpu);
/* Remove it from the pidhash */
unhash_process(idle);
cpu_pda[cpu].pcurrent = idle;
......
......@@ -472,6 +472,14 @@ asmlinkage void __init start_kernel(void)
*/
sched_init();
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
build_all_zonelists();
page_alloc_init();
printk("Kernel command line: %s\n", saved_command_line);
......@@ -538,13 +546,6 @@ asmlinkage void __init start_kernel(void)
acpi_early_init(); /* before LAPIC and SMP init */
/*
* We count on the initial thread going ok
* Like idlers init is an unlocked kernel thread, which will
* make syscalls (and thus be locked).
*/
init_idle(current, smp_processor_id());
/* Do the rest non-__init'ed, we're now alive */
rest_init();
}
......
......@@ -1121,6 +1121,15 @@ unsigned long nr_iowait(void)
return sum;
}
enum idle_type
{
IDLE,
NOT_IDLE,
NEWLY_IDLE,
};
#ifdef CONFIG_SMP
/*
* double_rq_lock - safely lock two runqueues
*
......@@ -1155,14 +1164,20 @@ static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
spin_unlock(&rq2->lock);
}
enum idle_type
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
{
IDLE,
NOT_IDLE,
NEWLY_IDLE,
};
#ifdef CONFIG_SMP
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
} else
spin_lock(&busiest->lock);
}
}
/*
* find_idlest_cpu - find the least busy runqueue.
......@@ -1357,21 +1372,6 @@ void sched_balance_exec(void)
put_cpu();
}
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
{
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
} else
spin_lock(&busiest->lock);
}
}
/*
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
......@@ -2209,6 +2209,15 @@ asmlinkage void __sched schedule(void)
prev = current;
rq = this_rq();
/*
* The idle thread is not allowed to schedule!
* Remove this check after it has been exercised a bit.
*/
if (unlikely(current == rq->idle) && current->state != TASK_RUNNING) {
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
}
release_kernel_lock(prev);
now = sched_clock();
if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
......@@ -3268,21 +3277,20 @@ void show_state(void)
void __devinit init_idle(task_t *idle, int cpu)
{
runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
runqueue_t *rq = cpu_rq(cpu);
unsigned long flags;
local_irq_save(flags);
double_rq_lock(idle_rq, rq);
idle_rq->curr = idle_rq->idle = idle;
deactivate_task(idle, rq);
idle->sleep_avg = 0;
idle->interactive_credit = 0;
idle->array = NULL;
idle->prio = MAX_PRIO;
idle->state = TASK_RUNNING;
set_task_cpu(idle, cpu);
double_rq_unlock(idle_rq, rq);
spin_lock_irqsave(&rq->lock, flags);
rq->curr = rq->idle = idle;
set_tsk_need_resched(idle);
local_irq_restore(flags);
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#ifdef CONFIG_PREEMPT
......@@ -3959,15 +3967,6 @@ void __init sched_init(void)
__set_bit(MAX_PRIO, array->bitmap);
}
}
/*
* We have to do a little magic to get the first
* thread right in SMP mode.
*/
rq = this_rq();
rq->curr = current;
rq->idle = current;
set_task_cpu(current, smp_processor_id());
wake_up_forked_process(current);
/*
* The boot idle thread does lazy MMU switching as well:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment