Commit fc1892be authored by Peter Zijlstra's avatar Peter Zijlstra

sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE

Note that tasks that are kept on the runqueue to burn off negative
lag, are not in fact runnable anymore, they'll get dequeued the moment
they get picked.

As such, don't count this time towards runnable.

Thanks to Valentin for spotting I had this backwards initially.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <vschneid@redhat.com>
Tested-by: default avatarValentin Schneider <vschneid@redhat.com>
Link: https://lkml.kernel.org/r/20240727105030.514088302@infradead.org
parent 54a58a78
......@@ -5402,6 +5402,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
!entity_eligible(cfs_rq, se)) {
if (cfs_rq->next == se)
cfs_rq->next = NULL;
update_load_avg(cfs_rq, se, 0);
se->sched_delayed = 1;
return false;
}
......@@ -6841,6 +6842,7 @@ requeue_delayed_entity(struct sched_entity *se)
}
}
update_load_avg(cfs_rq, se, 0);
se->sched_delayed = 0;
}
......
......@@ -820,6 +820,9 @@ static inline void se_update_runnable(struct sched_entity *se)
static inline long se_runnable(struct sched_entity *se)
{
if (se->sched_delayed)
return false;
if (entity_is_task(se))
return !!se->on_rq;
else
......@@ -834,6 +837,9 @@ static inline void se_update_runnable(struct sched_entity *se) { }
static inline long se_runnable(struct sched_entity *se)
{
if (se->sched_delayed)
return false;
return !!se->on_rq;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment