Commit 59efa0ba authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Kill sched_class::task_waking to clean up the migration logic

With sched_class::task_waking being called only when we do
set_task_cpu(), we can make sched_class::migrate_task_rq() do the work
and eliminate sched_class::task_waking entirely.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Hunter <ahh@google.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Paul Turner <pjt@google.com>
Cc: Pavan Kondeti <pkondeti@codeaurora.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: byungchul.park@lge.com
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b5179ac7
...@@ -1717,11 +1717,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, ...@@ -1717,11 +1717,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
if (p->sched_contributes_to_load) if (p->sched_contributes_to_load)
rq->nr_uninterruptible--; rq->nr_uninterruptible--;
/*
* If we migrated; we must have called sched_class::task_waking().
*/
if (wake_flags & WF_MIGRATED) if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_WAKING; en_flags |= ENQUEUE_MIGRATED;
#endif #endif
ttwu_activate(rq, p, en_flags); ttwu_activate(rq, p, en_flags);
...@@ -2049,10 +2046,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -2049,10 +2046,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) { if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED; wake_flags |= WF_MIGRATED;
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -3273,7 +3273,7 @@ static inline void check_schedstat_required(void) ...@@ -3273,7 +3273,7 @@ static inline void check_schedstat_required(void)
* *
* WAKEUP (remote) * WAKEUP (remote)
* *
* ->task_waking_fair() * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
* vruntime -= min_vruntime * vruntime -= min_vruntime
* *
* enqueue * enqueue
...@@ -3292,7 +3292,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -3292,7 +3292,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update the normalized vruntime before updating min_vruntime * Update the normalized vruntime before updating min_vruntime
* through calling update_curr(). * through calling update_curr().
*/ */
if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED))
se->vruntime += cfs_rq->min_vruntime; se->vruntime += cfs_rq->min_vruntime;
/* /*
...@@ -4841,33 +4841,6 @@ static unsigned long cpu_avg_load_per_task(int cpu) ...@@ -4841,33 +4841,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
return 0; return 0;
} }
/*
* Called to migrate a waking task; as blocked tasks retain absolute vruntime
* the migration needs to deal with this by subtracting the old and adding the
* new min_vruntime -- the latter is done by enqueue_entity() when placing
* the task on the new runqueue.
*/
static void task_waking_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 min_vruntime;
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
do {
min_vruntime_copy = cfs_rq->min_vruntime_copy;
smp_rmb();
min_vruntime = cfs_rq->min_vruntime;
} while (min_vruntime != min_vruntime_copy);
#else
min_vruntime = cfs_rq->min_vruntime;
#endif
se->vruntime -= min_vruntime;
}
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/* /*
* effective_load() calculates the load change as seen from the root_task_group * effective_load() calculates the load change as seen from the root_task_group
...@@ -5402,6 +5375,32 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f ...@@ -5402,6 +5375,32 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
*/ */
static void migrate_task_rq_fair(struct task_struct *p) static void migrate_task_rq_fair(struct task_struct *p)
{ {
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
* min_vruntime -- the latter is done by enqueue_entity() when placing
* the task on the new runqueue.
*/
if (p->state == TASK_WAKING) {
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 min_vruntime;
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
do {
min_vruntime_copy = cfs_rq->min_vruntime_copy;
smp_rmb();
min_vruntime = cfs_rq->min_vruntime;
} while (min_vruntime != min_vruntime_copy);
#else
min_vruntime = cfs_rq->min_vruntime;
#endif
se->vruntime -= min_vruntime;
}
/* /*
* We are supposed to update the task to "current" time, then its up to date * We are supposed to update the task to "current" time, then its up to date
* and ready to go to new CPU/cfs_rq. But we have difficulty in getting * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
...@@ -8672,7 +8671,6 @@ const struct sched_class fair_sched_class = { ...@@ -8672,7 +8671,6 @@ const struct sched_class fair_sched_class = {
.rq_online = rq_online_fair, .rq_online = rq_online_fair,
.rq_offline = rq_offline_fair, .rq_offline = rq_offline_fair,
.task_waking = task_waking_fair,
.task_dead = task_dead_fair, .task_dead = task_dead_fair,
.set_cpus_allowed = set_cpus_allowed_common, .set_cpus_allowed = set_cpus_allowed_common,
#endif #endif
......
...@@ -1168,7 +1168,7 @@ extern const u32 sched_prio_to_wmult[40]; ...@@ -1168,7 +1168,7 @@ extern const u32 sched_prio_to_wmult[40];
* *
* ENQUEUE_HEAD - place at front of runqueue (tail if not specified) * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
* ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
* ENQUEUE_WAKING - sched_class::task_waking was called * ENQUEUE_MIGRATED - the task was migrated during wakeup
* *
*/ */
...@@ -1183,9 +1183,9 @@ extern const u32 sched_prio_to_wmult[40]; ...@@ -1183,9 +1183,9 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_HEAD 0x08 #define ENQUEUE_HEAD 0x08
#define ENQUEUE_REPLENISH 0x10 #define ENQUEUE_REPLENISH 0x10
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define ENQUEUE_WAKING 0x20 #define ENQUEUE_MIGRATED 0x20
#else #else
#define ENQUEUE_WAKING 0x00 #define ENQUEUE_MIGRATED 0x00
#endif #endif
#define RETRY_TASK ((void *)-1UL) #define RETRY_TASK ((void *)-1UL)
...@@ -1217,7 +1217,6 @@ struct sched_class { ...@@ -1217,7 +1217,6 @@ struct sched_class {
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p); void (*migrate_task_rq)(struct task_struct *p);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task); void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p, void (*set_cpus_allowed)(struct task_struct *p,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment