Commit f3479f10 authored by Ingo Molnar's avatar Ingo Molnar

sched: remove the sleep-bonus interactivity code

remove the sleep-bonus interactivity code from the core scheduler.

scheduling policy is implemented in the policy modules, and CFS does
not need such type of heuristics.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c18a1732
...@@ -894,7 +894,7 @@ static inline int __normal_prio(struct task_struct *p) ...@@ -894,7 +894,7 @@ static inline int __normal_prio(struct task_struct *p)
{ {
int bonus, prio; int bonus, prio;
bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; bonus = 0;
prio = p->static_prio - bonus; prio = p->static_prio - bonus;
if (prio < MAX_RT_PRIO) if (prio < MAX_RT_PRIO)
...@@ -970,42 +970,6 @@ static inline void __activate_idle_task(struct task_struct *p, struct rq *rq) ...@@ -970,42 +970,6 @@ static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
*/ */
static int recalc_task_prio(struct task_struct *p, unsigned long long now) static int recalc_task_prio(struct task_struct *p, unsigned long long now)
{ {
/* Caller must always ensure 'now >= p->timestamp' */
unsigned long sleep_time = now - p->timestamp;
if (batch_task(p))
sleep_time = 0;
if (likely(sleep_time > 0)) {
/*
* This ceiling is set to the lowest priority that would allow
* a task to be reinserted into the active array on timeslice
* completion.
*/
unsigned long ceiling = INTERACTIVE_SLEEP(p);
if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
/*
* Prevents user tasks from achieving best priority
* with one single large enough sleep.
*/
p->sleep_avg = ceiling;
} else {
/*
* This code gives a bonus to interactive tasks.
*
* The boost works by updating the 'average sleep time'
* value here, based on ->timestamp. The more time a
* task spends sleeping, the higher the average gets -
* and the higher the priority boost gets as well.
*/
p->sleep_avg += sleep_time;
}
if (p->sleep_avg > NS_MAX_SLEEP_AVG)
p->sleep_avg = NS_MAX_SLEEP_AVG;
}
return effective_prio(p); return effective_prio(p);
} }
...@@ -3560,9 +3524,6 @@ asmlinkage void __sched schedule(void) ...@@ -3560,9 +3524,6 @@ asmlinkage void __sched schedule(void)
clear_tsk_need_resched(prev); clear_tsk_need_resched(prev);
rcu_qsctr_inc(task_cpu(prev)); rcu_qsctr_inc(task_cpu(prev));
prev->sleep_avg -= run_time;
if ((long)prev->sleep_avg <= 0)
prev->sleep_avg = 0;
prev->timestamp = prev->last_ran = now; prev->timestamp = prev->last_ran = now;
sched_info_switch(prev, next); sched_info_switch(prev, next);
...@@ -4204,11 +4165,6 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) ...@@ -4204,11 +4165,6 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
p->normal_prio = normal_prio(p); p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */ /* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p); p->prio = rt_mutex_getprio(p);
/*
* SCHED_BATCH tasks are treated as perpetual CPU hogs:
*/
if (policy == SCHED_BATCH)
p->sleep_avg = 0;
set_load_weight(p); set_load_weight(p);
} }
...@@ -4931,7 +4887,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) ...@@ -4931,7 +4887,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
unsigned long flags; unsigned long flags;
idle->timestamp = sched_clock(); idle->timestamp = sched_clock();
idle->sleep_avg = 0;
idle->array = NULL; idle->array = NULL;
idle->prio = idle->normal_prio = MAX_PRIO; idle->prio = idle->normal_prio = MAX_PRIO;
idle->state = TASK_RUNNING; idle->state = TASK_RUNNING;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment