Commit 260598f1 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Split up put_prev_task_balance()

With the goal of pushing put_prev_task() after pick_task() / into
pick_next_task().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240813224015.943143811@infradead.org
parent 4686cc59
......@@ -5841,7 +5841,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
schedstat_inc(this_rq()->sched_count);
}
static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
static void prev_balance(struct rq *rq, struct task_struct *prev,
struct rq_flags *rf)
{
#ifdef CONFIG_SMP
......@@ -5860,8 +5860,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
}
#endif
put_prev_task(rq, prev);
/*
* We've updated @prev and no longer need the server link, clear it.
* Must be done before ->pick_next_task() because that can (re)set
......@@ -5917,7 +5915,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
restart:
put_prev_task_balance(rq, prev, rf);
prev_balance(rq, prev, rf);
put_prev_task(rq, prev);
for_each_class(class) {
p = class->pick_next_task(rq);
......@@ -6017,7 +6016,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
goto out;
}
put_prev_task_balance(rq, prev, rf);
prev_balance(rq, prev, rf);
put_prev_task(rq, prev);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment