Commit 2f16618a authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sched: minor cleanups

From: Nick Piggin <nickpiggin@yahoo.com.au>

Minor cleanups from Ingo's patch including task_hot (do it right in
try_to_wake_up too).
parent 80b19256
...@@ -597,9 +597,9 @@ struct sched_domain { ...@@ -597,9 +597,9 @@ struct sched_domain {
.cache_nice_tries = 0, \ .cache_nice_tries = 0, \
.per_cpu_gain = 15, \ .per_cpu_gain = 15, \
.flags = SD_BALANCE_NEWIDLE \ .flags = SD_BALANCE_NEWIDLE \
| SD_WAKE_AFFINE \ | SD_WAKE_AFFINE \
| SD_WAKE_IDLE \ | SD_WAKE_IDLE \
| SD_SHARE_CPUPOWER, \ | SD_SHARE_CPUPOWER, \
.last_balance = jiffies, \ .last_balance = jiffies, \
.balance_interval = 1, \ .balance_interval = 1, \
.nr_balance_failed = 0, \ .nr_balance_failed = 0, \
......
...@@ -187,6 +187,8 @@ static inline unsigned int task_timeslice(task_t *p) ...@@ -187,6 +187,8 @@ static inline unsigned int task_timeslice(task_t *p)
return BASE_TIMESLICE(p); return BASE_TIMESLICE(p);
} }
#define task_hot(p, now, sd) ((now) - (p)->timestamp < (sd)->cache_hot_time)
/* /*
* These are the runqueue data structures: * These are the runqueue data structures:
*/ */
...@@ -704,13 +706,11 @@ static inline int wake_idle(int cpu, task_t *p) ...@@ -704,13 +706,11 @@ static inline int wake_idle(int cpu, task_t *p)
*/ */
static int try_to_wake_up(task_t * p, unsigned int state, int sync) static int try_to_wake_up(task_t * p, unsigned int state, int sync)
{ {
int cpu, this_cpu, success = 0;
unsigned long flags; unsigned long flags;
int success = 0;
long old_state; long old_state;
runqueue_t *rq; runqueue_t *rq;
int cpu, this_cpu;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long long now;
unsigned long load, this_load; unsigned long load, this_load;
struct sched_domain *sd; struct sched_domain *sd;
int new_cpu; int new_cpu;
...@@ -753,8 +753,6 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -753,8 +753,6 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
if (load > this_load + SCHED_LOAD_SCALE*2) if (load > this_load + SCHED_LOAD_SCALE*2)
goto out_set_cpu; goto out_set_cpu;
now = sched_clock();
/* /*
* Migrate the task to the waking domain. * Migrate the task to the waking domain.
* Do not violate hard affinity. * Do not violate hard affinity.
...@@ -762,7 +760,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -762,7 +760,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd) {
if (!(sd->flags & SD_WAKE_AFFINE)) if (!(sd->flags & SD_WAKE_AFFINE))
break; break;
if (rq->timestamp_last_tick - p->timestamp < sd->cache_hot_time) if (task_hot(p, rq->timestamp_last_tick, sd))
break; break;
if (cpu_isset(cpu, sd->span)) if (cpu_isset(cpu, sd->span))
...@@ -774,22 +772,18 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -774,22 +772,18 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
new_cpu = wake_idle(new_cpu, p); new_cpu = wake_idle(new_cpu, p);
if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) { if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) {
set_task_cpu(p, new_cpu); set_task_cpu(p, new_cpu);
goto repeat_lock_task; task_rq_unlock(rq, &flags);
} /* might preempt at this point */
goto out_activate; rq = task_rq_lock(p, &flags);
old_state = p->state;
repeat_lock_task: if (!(old_state & state))
task_rq_unlock(rq, &flags); goto out;
rq = task_rq_lock(p, &flags); if (p->array)
old_state = p->state; goto out_running;
if (!(old_state & state))
goto out;
if (p->array)
goto out_running;
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpu = task_cpu(p); cpu = task_cpu(p);
}
out_activate: out_activate:
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -1301,7 +1295,7 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, ...@@ -1301,7 +1295,7 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
/* Aggressive migration if we've failed balancing */ /* Aggressive migration if we've failed balancing */
if (idle == NEWLY_IDLE || if (idle == NEWLY_IDLE ||
sd->nr_balance_failed < sd->cache_nice_tries) { sd->nr_balance_failed < sd->cache_nice_tries) {
if (rq->timestamp_last_tick - p->timestamp < sd->cache_hot_time) if (task_hot(p, rq->timestamp_last_tick, sd))
return 0; return 0;
} }
...@@ -1319,10 +1313,9 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, ...@@ -1319,10 +1313,9 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
unsigned long max_nr_move, struct sched_domain *sd, unsigned long max_nr_move, struct sched_domain *sd,
enum idle_type idle) enum idle_type idle)
{ {
int idx;
int pulled = 0;
prio_array_t *array, *dst_array; prio_array_t *array, *dst_array;
struct list_head *head, *curr; struct list_head *head, *curr;
int idx, pulled = 0;
task_t *tmp; task_t *tmp;
if (max_nr_move <= 0 || busiest->nr_running <= 1) if (max_nr_move <= 0 || busiest->nr_running <= 1)
...@@ -1411,10 +1404,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -1411,10 +1404,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
avg_load = 0; avg_load = 0;
cpus_and(tmp, group->cpumask, cpu_online_map); cpus_and(tmp, group->cpumask, cpu_online_map);
if (unlikely(cpus_empty(tmp))) { if (unlikely(cpus_empty(tmp)))
WARN_ON(1); goto nextgroup;
return NULL;
}
for_each_cpu_mask(i, tmp) { for_each_cpu_mask(i, tmp) {
/* Bias balancing toward cpus of our domain */ /* Bias balancing toward cpus of our domain */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment