Commit 53f7b9bc authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: fix ideal_runtime calculations for reniced tasks
  sched: improve prev_sum_exec_runtime setting
  sched: simplify __check_preempt_curr_fair()
  sched: fix xtensa build warning
  sched: debug: fix sum_exec_runtime clearing
  sched: debug: fix cfs_rq->wait_runtime accounting
  sched: fix niced_granularity() shift
  sched: fix MC/HT scheduler optimization, without breaking the FUZZ logic.
parents 3c038f97 11697830
...@@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) ...@@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
/* /*
* Shift right and round: * Shift right and round:
*/ */
#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
static unsigned long static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight, calc_delta_mine(unsigned long delta_exec, unsigned long weight,
...@@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, ...@@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
* Check whether we'd overflow the 64-bit multiplication: * Check whether we'd overflow the 64-bit multiplication:
*/ */
if (unlikely(tmp > WMULT_CONST)) if (unlikely(tmp > WMULT_CONST))
tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight, tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
WMULT_SHIFT/2); WMULT_SHIFT/2);
else else
tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT); tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
} }
...@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) ...@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
static void set_load_weight(struct task_struct *p) static void set_load_weight(struct task_struct *p)
{ {
task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
p->se.wait_runtime = 0; p->se.wait_runtime = 0;
if (task_has_rt_policy(p)) { if (task_has_rt_policy(p)) {
...@@ -2512,7 +2511,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -2512,7 +2511,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* a think about bumping its value to force at least one task to be * a think about bumping its value to force at least one task to be
* moved * moved
*/ */
if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) { if (*imbalance < busiest_load_per_task) {
unsigned long tmp, pwr_now, pwr_move; unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn; unsigned int imbn;
...@@ -2564,10 +2563,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -2564,10 +2563,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
pwr_move /= SCHED_LOAD_SCALE; pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */ /* Move if we gain throughput */
if (pwr_move <= pwr_now) if (pwr_move > pwr_now)
goto out_balanced; *imbalance = busiest_load_per_task;
*imbalance = busiest_load_per_task;
} }
return busiest; return busiest;
......
...@@ -283,4 +283,5 @@ void proc_sched_set_task(struct task_struct *p) ...@@ -283,4 +283,5 @@ void proc_sched_set_task(struct task_struct *p)
p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0; p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
#endif #endif
p->se.sum_exec_runtime = 0; p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
} }
...@@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_add(&cfs_rq->load, se->load.weight); update_load_add(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running++; cfs_rq->nr_running++;
se->on_rq = 1; se->on_rq = 1;
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
} }
static inline void static inline void
...@@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_sub(&cfs_rq->load, se->load.weight); update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running--; cfs_rq->nr_running--;
se->on_rq = 0; se->on_rq = 0;
schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
} }
static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
...@@ -291,7 +295,7 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity) ...@@ -291,7 +295,7 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
/* /*
* It will always fit into 'long': * It will always fit into 'long':
*/ */
return (long) (tmp >> WMULT_SHIFT); return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
} }
static inline void static inline void
...@@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
prev_runtime = se->wait_runtime; prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair); __add_wait_runtime(cfs_rq, se, delta_fair);
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
delta_fair = se->wait_runtime - prev_runtime; delta_fair = se->wait_runtime - prev_runtime;
/* /*
...@@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) ...@@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
if (tsk->state & TASK_UNINTERRUPTIBLE) if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock; se->block_start = rq_of(cfs_rq)->clock;
} }
cfs_rq->wait_runtime -= se->wait_runtime;
#endif #endif
} }
__dequeue_entity(cfs_rq, se); __dequeue_entity(cfs_rq, se);
...@@ -671,22 +673,39 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) ...@@ -671,22 +673,39 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
static int static void
__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct sched_entity *curr, unsigned long granularity) struct sched_entity *curr, unsigned long granularity)
{ {
s64 __delta = curr->fair_key - se->fair_key; s64 __delta = curr->fair_key - se->fair_key;
unsigned long ideal_runtime, delta_exec;
/*
* ideal_runtime is compared against sum_exec_runtime, which is
* walltime, hence do not scale.
*/
ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
(unsigned long)sysctl_sched_min_granularity);
/*
* If we executed more than what the latency constraint suggests,
* reduce the rescheduling granularity. This way the total latency
* of how much a task is not scheduled converges to
* sysctl_sched_latency:
*/
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime)
granularity = 0;
/* /*
* Take scheduling granularity into account - do not * Take scheduling granularity into account - do not
* preempt the current task unless the best task has * preempt the current task unless the best task has
* a larger than sched_granularity fairness advantage: * a larger than sched_granularity fairness advantage:
*
* scale granularity as key space is in fair_clock.
*/ */
if (__delta > niced_granularity(curr, granularity)) { if (__delta > niced_granularity(curr, granularity))
resched_task(rq_of(cfs_rq)->curr); resched_task(rq_of(cfs_rq)->curr);
return 1;
}
return 0;
} }
static inline void static inline void
...@@ -702,6 +721,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -702,6 +721,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_stats_wait_end(cfs_rq, se); update_stats_wait_end(cfs_rq, se);
update_stats_curr_start(cfs_rq, se); update_stats_curr_start(cfs_rq, se);
set_cfs_rq_curr(cfs_rq, se); set_cfs_rq_curr(cfs_rq, se);
se->prev_sum_exec_runtime = se->sum_exec_runtime;
} }
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
...@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) ...@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{ {
unsigned long gran, ideal_runtime, delta_exec;
struct sched_entity *next; struct sched_entity *next;
/* /*
...@@ -748,22 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ...@@ -748,22 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (next == curr) if (next == curr)
return; return;
gran = sched_granularity(cfs_rq); __check_preempt_curr_fair(cfs_rq, next, curr,
ideal_runtime = niced_granularity(curr, sched_granularity(cfs_rq));
max(sysctl_sched_latency / cfs_rq->nr_running,
(unsigned long)sysctl_sched_min_granularity));
/*
* If we executed more than what the latency constraint suggests,
* reduce the rescheduling granularity. This way the total latency
* of how much a task is not scheduled converges to
* sysctl_sched_latency:
*/
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime)
gran = 0;
if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
} }
/************************************************** /**************************************************
...@@ -1121,10 +1126,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) ...@@ -1121,10 +1126,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
* The statistical average of wait_runtime is about * The statistical average of wait_runtime is about
* -granularity/2, so initialize the task with that: * -granularity/2, so initialize the task with that:
*/ */
if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
se->wait_runtime = -(sched_granularity(cfs_rq) / 2); se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}
__enqueue_entity(cfs_rq, se); __enqueue_entity(cfs_rq, se);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment