Commit f2ac58ee authored by Ingo Molnar's avatar Ingo Molnar

sched: remove sleep_type

remove the sleep_type heuristics from the core scheduler - scheduling
policy is implemented in the scheduling-policy modules. (and CFS does
not use this type of sleep-type heuristics)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 45bf76df
...@@ -788,13 +788,6 @@ struct mempolicy; ...@@ -788,13 +788,6 @@ struct mempolicy;
struct pipe_inode_info; struct pipe_inode_info;
struct uts_namespace; struct uts_namespace;
enum sleep_type {
SLEEP_NORMAL,
SLEEP_NONINTERACTIVE,
SLEEP_INTERACTIVE,
SLEEP_INTERRUPTED,
};
struct prio_array; struct prio_array;
struct rq; struct rq;
struct sched_domain; struct sched_domain;
...@@ -905,7 +898,6 @@ struct task_struct { ...@@ -905,7 +898,6 @@ struct task_struct {
unsigned long sleep_avg; unsigned long sleep_avg;
unsigned long long timestamp, last_ran; unsigned long long timestamp, last_ran;
unsigned long long sched_time; /* sched_clock time spent running */ unsigned long long sched_time; /* sched_clock time spent running */
enum sleep_type sleep_type;
unsigned int policy; unsigned int policy;
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
......
...@@ -990,32 +990,7 @@ static int recalc_task_prio(struct task_struct *p, unsigned long long now) ...@@ -990,32 +990,7 @@ static int recalc_task_prio(struct task_struct *p, unsigned long long now)
* with one single large enough sleep. * with one single large enough sleep.
*/ */
p->sleep_avg = ceiling; p->sleep_avg = ceiling;
/*
* Using INTERACTIVE_SLEEP() as a ceiling places a
* nice(0) task 1ms sleep away from promotion, and
* gives it 700ms to round-robin with no chance of
* being demoted. This is more than generous, so
* mark this sleep as non-interactive to prevent the
* on-runqueue bonus logic from intervening should
* this task not receive cpu immediately.
*/
p->sleep_type = SLEEP_NONINTERACTIVE;
} else { } else {
/*
* Tasks waking from uninterruptible sleep are
* limited in their sleep_avg rise as they
* are likely to be waiting on I/O
*/
if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
if (p->sleep_avg >= ceiling)
sleep_time = 0;
else if (p->sleep_avg + sleep_time >=
ceiling) {
p->sleep_avg = ceiling;
sleep_time = 0;
}
}
/* /*
* This code gives a bonus to interactive tasks. * This code gives a bonus to interactive tasks.
* *
...@@ -1069,29 +1044,6 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local) ...@@ -1069,29 +1044,6 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
} }
p->prio = recalc_task_prio(p, now); p->prio = recalc_task_prio(p, now);
/*
* This checks to make sure it's not an uninterruptible task
* that is now waking up.
*/
if (p->sleep_type == SLEEP_NORMAL) {
/*
* Tasks which were woken up by interrupts (ie. hw events)
* are most likely of interactive nature. So we give them
* the credit of extending their sleep time to the period
* of time they spend on the runqueue, waiting for execution
* on a CPU, first time around:
*/
if (in_interrupt())
p->sleep_type = SLEEP_INTERRUPTED;
else {
/*
* Normal first-time wakeups get a credit too for
* on-runqueue time, but it will be weighted down:
*/
p->sleep_type = SLEEP_INTERACTIVE;
}
}
p->timestamp = now; p->timestamp = now;
out: out:
__activate_task(p, rq); __activate_task(p, rq);
...@@ -1641,23 +1593,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -1641,23 +1593,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
out_activate: out_activate:
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
if (old_state == TASK_UNINTERRUPTIBLE) { if (old_state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--; rq->nr_uninterruptible--;
/*
* Tasks on involuntary sleep don't earn
* sleep_avg beyond just interactive state.
*/
p->sleep_type = SLEEP_NONINTERACTIVE;
} else
/*
* Tasks that have marked their sleep as noninteractive get
* woken up with their sleep average not weighted in an
* interactive way.
*/
if (old_state & TASK_NONINTERACTIVE)
p->sleep_type = SLEEP_NONINTERACTIVE;
activate_task(p, rq, cpu == this_cpu); activate_task(p, rq, cpu == this_cpu);
/* /*
...@@ -3533,12 +3470,6 @@ EXPORT_SYMBOL(sub_preempt_count); ...@@ -3533,12 +3470,6 @@ EXPORT_SYMBOL(sub_preempt_count);
#endif #endif
static inline int interactive_sleep(enum sleep_type sleep_type)
{
return (sleep_type == SLEEP_INTERACTIVE ||
sleep_type == SLEEP_INTERRUPTED);
}
/* /*
* schedule() is the main scheduler function. * schedule() is the main scheduler function.
*/ */
...@@ -3549,7 +3480,7 @@ asmlinkage void __sched schedule(void) ...@@ -3549,7 +3480,7 @@ asmlinkage void __sched schedule(void)
struct list_head *queue; struct list_head *queue;
unsigned long long now; unsigned long long now;
unsigned long run_time; unsigned long run_time;
int cpu, idx, new_prio; int cpu, idx;
long *switch_count; long *switch_count;
struct rq *rq; struct rq *rq;
...@@ -3642,24 +3573,6 @@ asmlinkage void __sched schedule(void) ...@@ -3642,24 +3573,6 @@ asmlinkage void __sched schedule(void)
queue = array->queue + idx; queue = array->queue + idx;
next = list_entry(queue->next, struct task_struct, run_list); next = list_entry(queue->next, struct task_struct, run_list);
if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
unsigned long long delta = now - next->timestamp;
if (unlikely((long long)(now - next->timestamp) < 0))
delta = 0;
if (next->sleep_type == SLEEP_INTERACTIVE)
delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
array = next->array;
new_prio = recalc_task_prio(next, next->timestamp + delta);
if (unlikely(next->prio != new_prio)) {
dequeue_task(next, array);
next->prio = new_prio;
enqueue_task(next, array);
}
}
next->sleep_type = SLEEP_NORMAL;
switch_tasks: switch_tasks:
if (next == rq->idle) if (next == rq->idle)
schedstat_inc(rq, sched_goidle); schedstat_inc(rq, sched_goidle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment