Commit d0c451f8 authored by Linus Torvalds's avatar Linus Torvalds

Merge

parents 0da6b746 76e3413d
...@@ -328,7 +328,7 @@ struct task_struct { ...@@ -328,7 +328,7 @@ struct task_struct {
prio_array_t *array; prio_array_t *array;
unsigned long sleep_avg; unsigned long sleep_avg;
unsigned long sleep_timestamp; unsigned long last_run;
unsigned long policy; unsigned long policy;
unsigned long cpus_allowed; unsigned long cpus_allowed;
......
...@@ -916,7 +916,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -916,7 +916,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
*/ */
p->first_time_slice = 1; p->first_time_slice = 1;
current->time_slice >>= 1; current->time_slice >>= 1;
p->sleep_timestamp = jiffies; p->last_run = jiffies;
if (!current->time_slice) { if (!current->time_slice) {
/* /*
* This case is rare, it happens when the parent has only * This case is rare, it happens when the parent has only
......
...@@ -54,20 +54,19 @@ ...@@ -54,20 +54,19 @@
/* /*
* These are the 'tuning knobs' of the scheduler: * These are the 'tuning knobs' of the scheduler:
* *
* Minimum timeslice is 10 msecs, default timeslice is 150 msecs, * Minimum timeslice is 10 msecs, default timeslice is 100 msecs,
* maximum timeslice is 300 msecs. Timeslices get refilled after * maximum timeslice is 200 msecs. Timeslices get refilled after
* they expire. * they expire.
*/ */
#define MIN_TIMESLICE ( 10 * HZ / 1000) #define MIN_TIMESLICE ( 10 * HZ / 1000)
#define MAX_TIMESLICE (300 * HZ / 1000) #define MAX_TIMESLICE (200 * HZ / 1000)
#define CHILD_PENALTY 95 #define CHILD_PENALTY 50
#define PARENT_PENALTY 100 #define PARENT_PENALTY 100
#define EXIT_WEIGHT 3 #define EXIT_WEIGHT 3
#define PRIO_BONUS_RATIO 25 #define PRIO_BONUS_RATIO 25
#define INTERACTIVE_DELTA 2 #define INTERACTIVE_DELTA 2
#define MAX_SLEEP_AVG (2*HZ) #define MAX_SLEEP_AVG (10*HZ)
#define STARVATION_LIMIT (2*HZ) #define STARVATION_LIMIT (10*HZ)
#define NODE_THRESHOLD 125
/* /*
* If a task is 'interactive' then we reinsert it in the active * If a task is 'interactive' then we reinsert it in the active
...@@ -323,16 +322,21 @@ static inline int effective_prio(task_t *p) ...@@ -323,16 +322,21 @@ static inline int effective_prio(task_t *p)
* Also update all the scheduling statistics stuff. (sleep average * Also update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.) * calculation, priority modifiers, etc.)
*/ */
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
enqueue_task(p, rq->active);
rq->nr_running++;
}
static inline void activate_task(task_t *p, runqueue_t *rq) static inline void activate_task(task_t *p, runqueue_t *rq)
{ {
unsigned long sleep_time = jiffies - p->sleep_timestamp; unsigned long sleep_time = jiffies - p->last_run;
prio_array_t *array = rq->active;
if (!rt_task(p) && sleep_time) { if (!rt_task(p) && sleep_time) {
/* /*
* This code gives a bonus to interactive tasks. We update * This code gives a bonus to interactive tasks. We update
* an 'average sleep time' value here, based on * an 'average sleep time' value here, based on
* sleep_timestamp. The more time a task spends sleeping, * ->last_run. The more time a task spends sleeping,
* the higher the average gets - and the higher the priority * the higher the average gets - and the higher the priority
* boost gets as well. * boost gets as well.
*/ */
...@@ -345,11 +349,9 @@ static inline void activate_task(task_t *p, runqueue_t *rq) ...@@ -345,11 +349,9 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
if (!in_interrupt()) if (!in_interrupt())
current->sleep_avg = ticks; current->sleep_avg = ticks;
} }
p->prio = effective_prio(p); p->prio = effective_prio(p);
} }
enqueue_task(p, array); __activate_task(p, rq);
nr_running_inc(rq);
} }
/* /*
...@@ -486,10 +488,13 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -486,10 +488,13 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
} }
if (old_state == TASK_UNINTERRUPTIBLE) if (old_state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--; rq->nr_uninterruptible--;
activate_task(p, rq); if (sync)
__activate_task(p, rq);
if (p->prio < rq->curr->prio) else {
resched_task(rq->curr); activate_task(p, rq);
if (p->prio < rq->curr->prio)
resched_task(rq->curr);
}
success = 1; success = 1;
} }
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
...@@ -532,8 +537,16 @@ void wake_up_forked_process(task_t * p) ...@@ -532,8 +537,16 @@ void wake_up_forked_process(task_t * p)
p->prio = effective_prio(p); p->prio = effective_prio(p);
} }
set_task_cpu(p, smp_processor_id()); set_task_cpu(p, smp_processor_id());
activate_task(p, rq);
if (unlikely(!current->array))
__activate_task(p, rq);
else {
p->prio = current->prio;
list_add_tail(&p->run_list, &current->run_list);
p->array = current->array;
p->array->nr_active++;
rq->nr_running++;
}
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
...@@ -960,6 +973,11 @@ static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t ...@@ -960,6 +973,11 @@ static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t
*/ */
if (p->prio < this_rq->curr->prio) if (p->prio < this_rq->curr->prio)
set_need_resched(); set_need_resched();
else {
if (p->prio == this_rq->curr->prio &&
p->time_slice > this_rq->curr->time_slice)
set_need_resched();
}
} }
/* /*
...@@ -1023,7 +1041,7 @@ static void load_balance(runqueue_t *this_rq, int idle) ...@@ -1023,7 +1041,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
*/ */
#define CAN_MIGRATE_TASK(p,rq,this_cpu) \ #define CAN_MIGRATE_TASK(p,rq,this_cpu) \
((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \ ((jiffies - (p)->last_run > cache_decay_ticks) && \
!task_running(rq, p) && \ !task_running(rq, p) && \
((p)->cpus_allowed & (1UL << (this_cpu)))) ((p)->cpus_allowed & (1UL << (this_cpu))))
...@@ -1083,9 +1101,9 @@ DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } }; ...@@ -1083,9 +1101,9 @@ DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } };
* increasing number of running tasks: * increasing number of running tasks:
*/ */
#define EXPIRED_STARVING(rq) \ #define EXPIRED_STARVING(rq) \
((rq)->expired_timestamp && \ (STARVATION_LIMIT && ((rq)->expired_timestamp && \
(jiffies - (rq)->expired_timestamp >= \ (jiffies - (rq)->expired_timestamp >= \
STARVATION_LIMIT * ((rq)->nr_running) + 1)) STARVATION_LIMIT * ((rq)->nr_running) + 1)))
/* /*
* This function gets called by the timer code, with HZ frequency. * This function gets called by the timer code, with HZ frequency.
...@@ -1208,7 +1226,7 @@ asmlinkage void schedule(void) ...@@ -1208,7 +1226,7 @@ asmlinkage void schedule(void)
rq = this_rq(); rq = this_rq();
release_kernel_lock(prev); release_kernel_lock(prev);
prev->sleep_timestamp = jiffies; prev->last_run = jiffies;
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
/* /*
...@@ -1708,7 +1726,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param *param) ...@@ -1708,7 +1726,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param *param)
else else
p->prio = p->static_prio; p->prio = p->static_prio;
if (array) if (array)
activate_task(p, task_rq(p)); __activate_task(p, task_rq(p));
out_unlock: out_unlock:
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment