Commit 044996c2 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/sched-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents eb86b60f 97cab050
...@@ -140,6 +140,7 @@ struct prio_array { ...@@ -140,6 +140,7 @@ struct prio_array {
*/ */
struct runqueue { struct runqueue {
spinlock_t lock; spinlock_t lock;
spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp; unsigned long nr_running, nr_switches, expired_timestamp;
task_t *curr, *idle; task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2]; prio_array_t *active, *expired, arrays[2];
...@@ -400,7 +401,7 @@ void sched_exit(task_t * p) ...@@ -400,7 +401,7 @@ void sched_exit(task_t * p)
#if CONFIG_SMP || CONFIG_PREEMPT #if CONFIG_SMP || CONFIG_PREEMPT
asmlinkage void schedule_tail(void) asmlinkage void schedule_tail(void)
{ {
spin_unlock_irq(&this_rq()->lock); spin_unlock_irq(&this_rq()->frozen);
} }
#endif #endif
...@@ -518,12 +519,14 @@ static void load_balance(runqueue_t *this_rq, int idle) ...@@ -518,12 +519,14 @@ static void load_balance(runqueue_t *this_rq, int idle)
busiest = NULL; busiest = NULL;
max_load = 1; max_load = 1;
for (i = 0; i < smp_num_cpus; i++) { for (i = 0; i < smp_num_cpus; i++) {
rq_src = cpu_rq(cpu_logical_map(i)); int logical = cpu_logical_map(i);
if (idle || (rq_src->nr_running < this_rq->prev_nr_running[i]))
rq_src = cpu_rq(logical);
if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical]))
load = rq_src->nr_running; load = rq_src->nr_running;
else else
load = this_rq->prev_nr_running[i]; load = this_rq->prev_nr_running[logical];
this_rq->prev_nr_running[i] = rq_src->nr_running; this_rq->prev_nr_running[logical] = rq_src->nr_running;
if ((load > max_load) && (rq_src != this_rq)) { if ((load > max_load) && (rq_src != this_rq)) {
busiest = rq_src; busiest = rq_src;
...@@ -590,7 +593,7 @@ static void load_balance(runqueue_t *this_rq, int idle) ...@@ -590,7 +593,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
#define CAN_MIGRATE_TASK(p,rq,this_cpu) \ #define CAN_MIGRATE_TASK(p,rq,this_cpu) \
((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \ ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
((p) != (rq)->curr) && \ ((p) != (rq)->curr) && \
(tmp->cpus_allowed & (1 << (this_cpu)))) ((p)->cpus_allowed & (1 << (this_cpu))))
if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) { if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
curr = curr->next; curr = curr->next;
...@@ -808,16 +811,22 @@ asmlinkage void schedule(void) ...@@ -808,16 +811,22 @@ asmlinkage void schedule(void)
if (likely(prev != next)) { if (likely(prev != next)) {
rq->nr_switches++; rq->nr_switches++;
rq->curr = next; rq->curr = next;
spin_lock(&rq->frozen);
spin_unlock(&rq->lock);
context_switch(prev, next); context_switch(prev, next);
/* /*
* The runqueue pointer might be from another CPU * The runqueue pointer might be from another CPU
* if the new task was last running on a different * if the new task was last running on a different
* CPU - thus re-load it. * CPU - thus re-load it.
*/ */
barrier(); mb();
rq = this_rq(); rq = this_rq();
} spin_unlock_irq(&rq->frozen);
} else {
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
}
reacquire_kernel_lock(current); reacquire_kernel_lock(current);
preempt_enable_no_resched(); preempt_enable_no_resched();
...@@ -1463,6 +1472,7 @@ void __init sched_init(void) ...@@ -1463,6 +1472,7 @@ void __init sched_init(void)
rq->active = rq->arrays; rq->active = rq->arrays;
rq->expired = rq->arrays + 1; rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock); spin_lock_init(&rq->lock);
spin_lock_init(&rq->frozen);
INIT_LIST_HEAD(&rq->migration_queue); INIT_LIST_HEAD(&rq->migration_queue);
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
...@@ -1649,19 +1659,31 @@ static int migration_thread(void * unused) ...@@ -1649,19 +1659,31 @@ static int migration_thread(void * unused)
void __init migration_init(void) void __init migration_init(void)
{ {
unsigned long tmp, orig_cache_decay_ticks;
int cpu; int cpu;
for (cpu = 0; cpu < smp_num_cpus; cpu++) tmp = 0;
for (cpu = 0; cpu < smp_num_cpus; cpu++) {
if (kernel_thread(migration_thread, NULL, if (kernel_thread(migration_thread, NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
BUG(); BUG();
tmp |= (1UL << cpu_logical_map(cpu));
}
migration_mask = tmp;
migration_mask = (1 << smp_num_cpus) - 1; orig_cache_decay_ticks = cache_decay_ticks;
cache_decay_ticks = 0;
for (cpu = 0; cpu < smp_num_cpus; cpu++) for (cpu = 0; cpu < smp_num_cpus; cpu++) {
while (!cpu_rq(cpu)->migration_thread) int logical = cpu_logical_map(cpu);
while (!cpu_rq(logical)->migration_thread)
schedule_timeout(2); schedule_timeout(2);
}
if (migration_mask) if (migration_mask)
BUG(); BUG();
cache_decay_ticks = orig_cache_decay_ticks;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment