Commit 74c72d80 authored by Ingo Molnar's avatar Ingo Molnar

[PATCH] more "interactivity changes", sched-B2

 - fix a (now-) bug in kernel/softirq.c, it did a wakeup outside any
   atomic regions, which falsely identified random processes as a
   non-atomic wakeup, and which causes random priority boost to be
   distributed.

 - reset the initial idle thread's priority back to PRIO_MAX after doing
   the wakeup_forked_process() - correct preemption relies on this.

 - update current->prio immediately after a backboost.

 - clean up effective_prio() & sleep_avg calculations so that there are
   fewer RT-task special cases.  This has the advantage of the sleep_avg
   being maintained even for RT tasks - this could be advantegous for
   tasks that briefly enter/exit RT mode.
parent 5667cc2b
...@@ -302,10 +302,13 @@ static inline void enqueue_task(struct task_struct *p, prio_array_t *array) ...@@ -302,10 +302,13 @@ static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
* *
* Both properties are important to certain workloads. * Both properties are important to certain workloads.
*/ */
static inline int effective_prio(task_t *p) static int effective_prio(task_t *p)
{ {
int bonus, prio; int bonus, prio;
if (rt_task(p))
return p->prio;
bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 - bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 -
MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2; MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2;
...@@ -333,23 +336,41 @@ static inline void activate_task(task_t *p, runqueue_t *rq) ...@@ -333,23 +336,41 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
{ {
unsigned long sleep_time = jiffies - p->last_run; unsigned long sleep_time = jiffies - p->last_run;
if (!rt_task(p) && sleep_time) { if (sleep_time) {
int sleep_avg;
/*
* This code gives a bonus to interactive tasks.
*
* The boost works by updating the 'average sleep time'
* value here, based on ->last_run. The more time a task
* spends sleeping, the higher the average gets - and the
* higher the priority boost gets as well.
*/
sleep_avg = p->sleep_avg + sleep_time;
/* /*
* This code gives a bonus to interactive tasks. We update * 'Overflow' bonus ticks go to the waker as well, so the
* an 'average sleep time' value here, based on * ticks are not lost. This has the effect of further
* ->last_run. The more time a task spends sleeping, * boosting tasks that are related to maximum-interactive
* the higher the average gets - and the higher the priority * tasks.
* boost gets as well.
*/ */
p->sleep_avg += sleep_time; if (sleep_avg > MAX_SLEEP_AVG) {
if (p->sleep_avg > MAX_SLEEP_AVG) { if (!in_interrupt()) {
int ticks = p->sleep_avg - MAX_SLEEP_AVG + current->sleep_avg; prio_array_t *array = current->array;
p->sleep_avg = MAX_SLEEP_AVG; BUG_ON(!array);
if (ticks > MAX_SLEEP_AVG) sleep_avg += current->sleep_avg - MAX_SLEEP_AVG;
ticks = MAX_SLEEP_AVG; if (sleep_avg > MAX_SLEEP_AVG)
if (!in_interrupt()) sleep_avg = MAX_SLEEP_AVG;
current->sleep_avg = ticks;
current->sleep_avg = sleep_avg;
dequeue_task(current, array);
current->prio = effective_prio(current);
enqueue_task(current, array);
}
sleep_avg = MAX_SLEEP_AVG;
} }
p->sleep_avg = sleep_avg;
p->prio = effective_prio(p); p->prio = effective_prio(p);
} }
__activate_task(p, rq); __activate_task(p, rq);
...@@ -527,16 +548,14 @@ void wake_up_forked_process(task_t * p) ...@@ -527,16 +548,14 @@ void wake_up_forked_process(task_t * p)
runqueue_t *rq = task_rq_lock(current, &flags); runqueue_t *rq = task_rq_lock(current, &flags);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
if (!rt_task(p)) { /*
/* * We decrease the sleep average of forking parents
* We decrease the sleep average of forking parents * and children as well, to keep max-interactive tasks
* and children as well, to keep max-interactive tasks * from forking tasks that are max-interactive.
* from forking tasks that are max-interactive. */
*/ current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100;
current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100; p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100;
p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100; p->prio = effective_prio(p);
p->prio = effective_prio(p);
}
set_task_cpu(p, smp_processor_id()); set_task_cpu(p, smp_processor_id());
if (unlikely(!current->array)) if (unlikely(!current->array))
...@@ -1147,6 +1166,16 @@ void scheduler_tick(int user_ticks, int sys_ticks) ...@@ -1147,6 +1166,16 @@ void scheduler_tick(int user_ticks, int sys_ticks)
return; return;
} }
spin_lock(&rq->lock); spin_lock(&rq->lock);
/*
* The task was running during this tick - update the
* time slice counter and the sleep average. Note: we
* do not update a thread's priority until it either
* goes to sleep or uses up its timeslice. This makes
* it possible for interactive tasks to use up their
* timeslices at their highest priority levels.
*/
if (p->sleep_avg)
p->sleep_avg--;
if (unlikely(rt_task(p))) { if (unlikely(rt_task(p))) {
/* /*
* RR tasks need a special form of timeslice management. * RR tasks need a special form of timeslice management.
...@@ -1163,16 +1192,6 @@ void scheduler_tick(int user_ticks, int sys_ticks) ...@@ -1163,16 +1192,6 @@ void scheduler_tick(int user_ticks, int sys_ticks)
} }
goto out; goto out;
} }
/*
* The task was running during this tick - update the
* time slice counter and the sleep average. Note: we
* do not update a thread's priority until it either
* goes to sleep or uses up its timeslice. This makes
* it possible for interactive tasks to use up their
* timeslices at their highest priority levels.
*/
if (p->sleep_avg)
p->sleep_avg--;
if (!--p->time_slice) { if (!--p->time_slice) {
dequeue_task(p, rq->active); dequeue_task(p, rq->active);
set_tsk_need_resched(p); set_tsk_need_resched(p);
...@@ -2468,6 +2487,7 @@ void __init sched_init(void) ...@@ -2468,6 +2487,7 @@ void __init sched_init(void)
rq->idle = current; rq->idle = current;
set_task_cpu(current, smp_processor_id()); set_task_cpu(current, smp_processor_id());
wake_up_forked_process(current); wake_up_forked_process(current);
current->prio = MAX_PRIO;
init_timers(); init_timers();
......
...@@ -92,10 +92,9 @@ asmlinkage void do_softirq() ...@@ -92,10 +92,9 @@ asmlinkage void do_softirq()
mask &= ~pending; mask &= ~pending;
goto restart; goto restart;
} }
__local_bh_enable();
if (pending) if (pending)
wakeup_softirqd(cpu); wakeup_softirqd(cpu);
__local_bh_enable();
} }
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment