Commit 15afe09b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: wakeup preempt when small overlap

Lin Ming reported a 10% OLTP regression against 2.6.27-rc4.

The difference seems to come from different preemption agressiveness,
which affects the cache footprint of the workload and its effective
cache trashing.

Aggresively preempt a task if its avg overlap is very small, this should
avoid the task going to sleep and find it still running when we schedule
back to it - saving a wakeup.
Reported-by: default avatarLin Ming <ming.m.lin@intel.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 09b22a2f
...@@ -897,7 +897,7 @@ struct sched_class { ...@@ -897,7 +897,7 @@ struct sched_class {
void (*yield_task) (struct rq *rq); void (*yield_task) (struct rq *rq);
int (*select_task_rq)(struct task_struct *p, int sync); int (*select_task_rq)(struct task_struct *p, int sync);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
struct task_struct * (*pick_next_task) (struct rq *rq); struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p); void (*put_prev_task) (struct rq *rq, struct task_struct *p);
......
...@@ -604,9 +604,9 @@ struct rq { ...@@ -604,9 +604,9 @@ struct rq {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
{ {
rq->curr->sched_class->check_preempt_curr(rq, p); rq->curr->sched_class->check_preempt_curr(rq, p, sync);
} }
static inline int cpu_of(struct rq *rq) static inline int cpu_of(struct rq *rq)
...@@ -2282,7 +2282,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2282,7 +2282,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
trace_mark(kernel_sched_wakeup, trace_mark(kernel_sched_wakeup,
"pid %d state %ld ## rq %p task %p rq->curr %p", "pid %d state %ld ## rq %p task %p rq->curr %p",
p->pid, p->state, rq, p, rq->curr); p->pid, p->state, rq, p, rq->curr);
check_preempt_curr(rq, p); check_preempt_curr(rq, p, sync);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -2417,7 +2417,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ...@@ -2417,7 +2417,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
trace_mark(kernel_sched_wakeup_new, trace_mark(kernel_sched_wakeup_new,
"pid %d state %ld ## rq %p task %p rq->curr %p", "pid %d state %ld ## rq %p task %p rq->curr %p",
p->pid, p->state, rq, p, rq->curr); p->pid, p->state, rq, p, rq->curr);
check_preempt_curr(rq, p); check_preempt_curr(rq, p, 0);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->sched_class->task_wake_up) if (p->sched_class->task_wake_up)
p->sched_class->task_wake_up(rq, p); p->sched_class->task_wake_up(rq, p);
...@@ -2877,7 +2877,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, ...@@ -2877,7 +2877,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
* Note that idle threads have a prio of MAX_PRIO, for this test * Note that idle threads have a prio of MAX_PRIO, for this test
* to be always true for them. * to be always true for them.
*/ */
check_preempt_curr(this_rq, p); check_preempt_curr(this_rq, p, 0);
} }
/* /*
...@@ -6007,7 +6007,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -6007,7 +6007,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
set_task_cpu(p, dest_cpu); set_task_cpu(p, dest_cpu);
if (on_rq) { if (on_rq) {
activate_task(rq_dest, p, 0); activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p); check_preempt_curr(rq_dest, p, 0);
} }
done: done:
ret = 1; ret = 1;
......
...@@ -1331,7 +1331,7 @@ static inline int depth_se(struct sched_entity *se) ...@@ -1331,7 +1331,7 @@ static inline int depth_se(struct sched_entity *se)
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
{ {
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
...@@ -1367,6 +1367,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) ...@@ -1367,6 +1367,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
if (!sched_feat(WAKEUP_PREEMPT)) if (!sched_feat(WAKEUP_PREEMPT))
return; return;
if (sched_feat(WAKEUP_OVERLAP) && sync &&
se->avg_overlap < sysctl_sched_migration_cost &&
pse->avg_overlap < sysctl_sched_migration_cost) {
resched_task(curr);
return;
}
/* /*
* preemption test can be made between sibling entities who are in the * preemption test can be made between sibling entities who are in the
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
...@@ -1649,7 +1656,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p, ...@@ -1649,7 +1656,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p,
if (p->prio > oldprio) if (p->prio > oldprio)
resched_task(rq->curr); resched_task(rq->curr);
} else } else
check_preempt_curr(rq, p); check_preempt_curr(rq, p, 0);
} }
/* /*
...@@ -1666,7 +1673,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p, ...@@ -1666,7 +1673,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p,
if (running) if (running)
resched_task(rq->curr); resched_task(rq->curr);
else else
check_preempt_curr(rq, p); check_preempt_curr(rq, p, 0);
} }
/* Account for a task changing its policy or group. /* Account for a task changing its policy or group.
......
...@@ -11,3 +11,4 @@ SCHED_FEAT(ASYM_GRAN, 1) ...@@ -11,3 +11,4 @@ SCHED_FEAT(ASYM_GRAN, 1)
SCHED_FEAT(LB_BIAS, 1) SCHED_FEAT(LB_BIAS, 1)
SCHED_FEAT(LB_WAKEUP_UPDATE, 1) SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
SCHED_FEAT(ASYM_EFF_LOAD, 1) SCHED_FEAT(ASYM_EFF_LOAD, 1)
SCHED_FEAT(WAKEUP_OVERLAP, 1)
...@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync) ...@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync)
/* /*
* Idle tasks are unconditionally rescheduled: * Idle tasks are unconditionally rescheduled:
*/ */
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
{ {
resched_task(rq->idle); resched_task(rq->idle);
} }
...@@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p, ...@@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p,
if (running) if (running)
resched_task(rq->curr); resched_task(rq->curr);
else else
check_preempt_curr(rq, p); check_preempt_curr(rq, p, 0);
} }
static void prio_changed_idle(struct rq *rq, struct task_struct *p, static void prio_changed_idle(struct rq *rq, struct task_struct *p,
...@@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, ...@@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
if (p->prio > oldprio) if (p->prio > oldprio)
resched_task(rq->curr); resched_task(rq->curr);
} else } else
check_preempt_curr(rq, p); check_preempt_curr(rq, p, 0);
} }
/* /*
......
...@@ -783,7 +783,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) ...@@ -783,7 +783,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
{ {
if (p->prio < rq->curr->prio) { if (p->prio < rq->curr->prio) {
resched_task(rq->curr); resched_task(rq->curr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment