Commit 7dc603c9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/fair: Fix PELT integrity for new tasks

Vincent and Yuyang found another few scenarios in which entity
tracking goes wobbly.

The scenarios are basically due to the fact that new tasks are not
immediately attached and thereby differ from the normal situation -- a
task is always attached to a cfs_rq load average (such that it
includes its blocked contribution) and are explicitly
detached/attached on migration to another cfs_rq.

Scenario 1: switch to fair class

  p->sched_class = fair_class;
  if (queued)
    enqueue_task(p);
      ...
        enqueue_entity()
	  enqueue_entity_load_avg()
	    migrated = !sa->last_update_time (true)
	    if (migrated)
	      attach_entity_load_avg()
  check_class_changed()
    switched_from() (!fair)
    switched_to()   (fair)
      switched_to_fair()
        attach_entity_load_avg()

If @p is a new task that hasn't been fair before, it will have
!last_update_time and, per the above, end up in
attach_entity_load_avg() _twice_.

Scenario 2: change between cgroups

  sched_move_group(p)
    if (queued)
      dequeue_task()
    task_move_group_fair()
      detach_task_cfs_rq()
        detach_entity_load_avg()
      set_task_rq()
      attach_task_cfs_rq()
        attach_entity_load_avg()
    if (queued)
      enqueue_task();
        ...
          enqueue_entity()
	    enqueue_entity_load_avg()
	      migrated = !sa->last_update_time (true)
	      if (migrated)
	        attach_entity_load_avg()

Similar as with scenario 1, if @p is a new task, it will have
!load_update_time and we'll end up in attach_entity_load_avg()
_twice_.

Furthermore, notice how we do a detach_entity_load_avg() on something
that wasn't attached to begin with.

As stated above; the problem is that the new task isn't yet attached
to the load tracking and thereby violates the invariant assumption.

This patch remedies this by ensuring a new task is indeed properly
attached to the load tracking on creation, through
post_init_entity_util_avg().

Of course, this isn't entirely as straightforward as one might think,
since the task is hashed before we call wake_up_new_task() and thus
can be poked at. We avoid this by adding TASK_NEW and teaching
cpu_cgroup_can_attach() to refuse such tasks.
Reported-by: default avatarYuyang Du <yuyang.du@intel.com>
Reported-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ea86cb4b
...@@ -219,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p); ...@@ -219,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p);
#define TASK_WAKING 256 #define TASK_WAKING 256
#define TASK_PARKED 512 #define TASK_PARKED 512
#define TASK_NOLOAD 1024 #define TASK_NOLOAD 1024
#define TASK_STATE_MAX 2048 #define TASK_NEW 2048
#define TASK_STATE_MAX 4096
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
extern char ___assert_task_state[1 - 2*!!( extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
......
...@@ -2342,11 +2342,11 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) ...@@ -2342,11 +2342,11 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
__sched_fork(clone_flags, p); __sched_fork(clone_flags, p);
/* /*
* We mark the process as running here. This guarantees that * We mark the process as NEW here. This guarantees that
* nobody will actually run it, and a signal or other external * nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either. * event cannot wake it up and insert it on the runqueue either.
*/ */
p->state = TASK_RUNNING; p->state = TASK_NEW;
/* /*
* Make sure we do not leak PI boosting priority to the child. * Make sure we do not leak PI boosting priority to the child.
...@@ -2383,6 +2383,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) ...@@ -2383,6 +2383,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_class = &fair_sched_class; p->sched_class = &fair_sched_class;
} }
init_entity_runnable_average(&p->se);
/* /*
* The child is not yet in the pid-hash so no cgroup attach races, * The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork() * and the cgroup is pinned to this child due to cgroup_fork()
...@@ -2529,9 +2531,8 @@ void wake_up_new_task(struct task_struct *p) ...@@ -2529,9 +2531,8 @@ void wake_up_new_task(struct task_struct *p)
struct rq_flags rf; struct rq_flags rf;
struct rq *rq; struct rq *rq;
/* Initialize new task's runnable average */
init_entity_runnable_average(&p->se);
raw_spin_lock_irqsave(&p->pi_lock, rf.flags); raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Fork balancing, do it here and not earlier because: * Fork balancing, do it here and not earlier because:
...@@ -8237,6 +8238,7 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ...@@ -8237,6 +8238,7 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
{ {
struct task_struct *task; struct task_struct *task;
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
int ret = 0;
cgroup_taskset_for_each(task, css, tset) { cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
...@@ -8247,8 +8249,24 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ...@@ -8247,8 +8249,24 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
if (task->sched_class != &fair_sched_class) if (task->sched_class != &fair_sched_class)
return -EINVAL; return -EINVAL;
#endif #endif
/*
* Serialize against wake_up_new_task() such that if its
* running, we're sure to observe its full state.
*/
raw_spin_lock_irq(&task->pi_lock);
/*
* Avoid calling sched_move_task() before wake_up_new_task()
* has happened. This would lead to problems with PELT, due to
* move wanting to detach+attach while we're not attached yet.
*/
if (task->state == TASK_NEW)
ret = -EINVAL;
raw_spin_unlock_irq(&task->pi_lock);
if (ret)
break;
} }
return 0; return ret;
} }
static void cpu_cgroup_attach(struct cgroup_taskset *tset) static void cpu_cgroup_attach(struct cgroup_taskset *tset)
......
...@@ -690,6 +690,10 @@ void init_entity_runnable_average(struct sched_entity *se) ...@@ -690,6 +690,10 @@ void init_entity_runnable_average(struct sched_entity *se)
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
} }
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
/* /*
* With new tasks being created, their initial util_avgs are extrapolated * With new tasks being created, their initial util_avgs are extrapolated
* based on the cfs_rq's current util_avg: * based on the cfs_rq's current util_avg:
...@@ -720,6 +724,7 @@ void post_init_entity_util_avg(struct sched_entity *se) ...@@ -720,6 +724,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg; struct sched_avg *sa = &se->avg;
long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2; long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
u64 now = cfs_rq_clock_task(cfs_rq);
if (cap > 0) { if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) { if (cfs_rq->avg.util_avg != 0) {
...@@ -733,16 +738,37 @@ void post_init_entity_util_avg(struct sched_entity *se) ...@@ -733,16 +738,37 @@ void post_init_entity_util_avg(struct sched_entity *se)
} }
sa->util_sum = sa->util_avg * LOAD_AVG_MAX; sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
} }
if (entity_is_task(se)) {
struct task_struct *p = task_of(se);
if (p->sched_class != &fair_sched_class) {
/*
* For !fair tasks do:
*
update_cfs_rq_load_avg(now, cfs_rq, false);
attach_entity_load_avg(cfs_rq, se);
switched_from_fair(rq, p);
*
* such that the next switched_to_fair() has the
* expected state.
*/
se->avg.last_update_time = now;
return;
}
}
update_cfs_rq_load_avg(now, cfs_rq, false);
attach_entity_load_avg(cfs_rq, se);
} }
#else #else /* !CONFIG_SMP */
void init_entity_runnable_average(struct sched_entity *se) void init_entity_runnable_average(struct sched_entity *se)
{ {
} }
void post_init_entity_util_avg(struct sched_entity *se) void post_init_entity_util_avg(struct sched_entity *se)
{ {
} }
#endif #endif /* CONFIG_SMP */
/* /*
* Update the current task's runtime statistics. * Update the current task's runtime statistics.
...@@ -2840,8 +2866,6 @@ void set_task_rq_fair(struct sched_entity *se, ...@@ -2840,8 +2866,6 @@ void set_task_rq_fair(struct sched_entity *se,
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
{ {
struct rq *rq = rq_of(cfs_rq); struct rq *rq = rq_of(cfs_rq);
...@@ -2951,6 +2975,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s ...@@ -2951,6 +2975,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
/* /*
* If we got migrated (either between CPUs or between cgroups) we'll * If we got migrated (either between CPUs or between cgroups) we'll
* have aged the average right before clearing @last_update_time. * have aged the average right before clearing @last_update_time.
*
* Or we're fresh through post_init_entity_util_avg().
*/ */
if (se->avg.last_update_time) { if (se->avg.last_update_time) {
__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
...@@ -3056,11 +3082,14 @@ void remove_entity_load_avg(struct sched_entity *se) ...@@ -3056,11 +3082,14 @@ void remove_entity_load_avg(struct sched_entity *se)
u64 last_update_time; u64 last_update_time;
/* /*
* Newly created task or never used group entity should not be removed * tasks cannot exit without having gone through wake_up_new_task() ->
* from its (source) cfs_rq * post_init_entity_util_avg() which will have added things to the
* cfs_rq, so we can remove unconditionally.
*
* Similarly for groups, they will have passed through
* post_init_entity_util_avg() before unregister_sched_fair_group()
* calls this.
*/ */
if (se->avg.last_update_time == 0)
return;
last_update_time = cfs_rq_last_update_time(cfs_rq); last_update_time = cfs_rq_last_update_time(cfs_rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment