Commit 120455c5 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Fix hotplug vs CPU bandwidth control

Since we now migrate tasks away before DYING, we should also move
bandwidth unthrottle, otherwise we can gain tasks from unthrottle
after we expect all tasks to be gone already.

Also; it looks like the RT balancers don't respect cpu_active() and
instead rely on rq->online in part, complete this. This too requires
we do set_rq_offline() earlier to match the cpu_active() semantics.
(The bigger patch is to convert RT to cpu_active() entirely)

Since set_rq_online() is called from sched_cpu_activate(), place
set_rq_offline() in sched_cpu_deactivate().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Reviewed-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Link: https://lkml.kernel.org/r/20201023102346.639538965@infradead.org
parent 1cf12e08
...@@ -6977,6 +6977,8 @@ int sched_cpu_activate(unsigned int cpu) ...@@ -6977,6 +6977,8 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu) int sched_cpu_deactivate(unsigned int cpu)
{ {
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
int ret; int ret;
set_cpu_active(cpu, false); set_cpu_active(cpu, false);
...@@ -6991,6 +6993,14 @@ int sched_cpu_deactivate(unsigned int cpu) ...@@ -6991,6 +6993,14 @@ int sched_cpu_deactivate(unsigned int cpu)
balance_push_set(cpu, true); balance_push_set(cpu, true);
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
update_rq_clock(rq);
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
rq_unlock_irqrestore(rq, &rf);
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
/* /*
* When going down, decrement the number of cores with SMT present. * When going down, decrement the number of cores with SMT present.
...@@ -7072,10 +7082,6 @@ int sched_cpu_dying(unsigned int cpu) ...@@ -7072,10 +7082,6 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu); sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
BUG_ON(rq->nr_running != 1); BUG_ON(rq->nr_running != 1);
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
......
...@@ -543,7 +543,7 @@ static int push_dl_task(struct rq *rq); ...@@ -543,7 +543,7 @@ static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{ {
return dl_task(prev); return rq->online && dl_task(prev);
} }
static DEFINE_PER_CPU(struct callback_head, dl_push_head); static DEFINE_PER_CPU(struct callback_head, dl_push_head);
......
...@@ -265,7 +265,7 @@ static void pull_rt_task(struct rq *this_rq); ...@@ -265,7 +265,7 @@ static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{ {
/* Try to pull RT tasks here if we lower this rq's prio */ /* Try to pull RT tasks here if we lower this rq's prio */
return rq->rt.highest_prio.curr > prev->prio; return rq->online && rq->rt.highest_prio.curr > prev->prio;
} }
static inline int rt_overloaded(struct rq *rq) static inline int rt_overloaded(struct rq *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment