Commit b5c44773 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Use cpu_dying() to fix balance_push vs hotplug-rollback

Use the new cpu_dying() state to simplify and fix the balance_push()
vs CPU hotplug rollback state.

Specifically, we currently rely on notifiers sched_cpu_dying() /
sched_cpu_activate() to terminate balance_push, however if the
cpu_down() fails when we're past sched_cpu_deactivate(), it should
terminate balance_push at that point and not wait until we hit
sched_cpu_activate().

Similarly, when cpu_up() fails and we're going back down, balance_push
should be active, where it currently is not.

So instead, make sure balance_push is enabled below SCHED_AP_ACTIVE
(when !cpu_active()), and gate it's utility with cpu_dying().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/YHgAYef83VQhKdC2@hirez.programming.kicks-ass.net
parent e40f74c5
...@@ -1811,7 +1811,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) ...@@ -1811,7 +1811,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
return cpu_online(cpu); return cpu_online(cpu);
/* Regular kernel threads don't get to stay during offline. */ /* Regular kernel threads don't get to stay during offline. */
if (cpu_rq(cpu)->balance_push) if (cpu_dying(cpu))
return false; return false;
/* But are allowed during online. */ /* But are allowed during online. */
...@@ -7638,6 +7638,9 @@ static DEFINE_PER_CPU(struct cpu_stop_work, push_work); ...@@ -7638,6 +7638,9 @@ static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
/* /*
* Ensure we only run per-cpu kthreads once the CPU goes !active. * Ensure we only run per-cpu kthreads once the CPU goes !active.
*
* This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
* effective when the hotplug motion is down.
*/ */
static void balance_push(struct rq *rq) static void balance_push(struct rq *rq)
{ {
...@@ -7645,11 +7648,18 @@ static void balance_push(struct rq *rq) ...@@ -7645,11 +7648,18 @@ static void balance_push(struct rq *rq)
lockdep_assert_held(&rq->lock); lockdep_assert_held(&rq->lock);
SCHED_WARN_ON(rq->cpu != smp_processor_id()); SCHED_WARN_ON(rq->cpu != smp_processor_id());
/* /*
* Ensure the thing is persistent until balance_push_set(.on = false); * Ensure the thing is persistent until balance_push_set(.on = false);
*/ */
rq->balance_callback = &balance_push_callback; rq->balance_callback = &balance_push_callback;
/*
* Only active while going offline.
*/
if (!cpu_dying(rq->cpu))
return;
/* /*
* Both the cpu-hotplug and stop task are in this case and are * Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process. * required to complete the hotplug process.
...@@ -7703,7 +7713,6 @@ static void balance_push_set(int cpu, bool on) ...@@ -7703,7 +7713,6 @@ static void balance_push_set(int cpu, bool on)
struct rq_flags rf; struct rq_flags rf;
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
rq->balance_push = on;
if (on) { if (on) {
WARN_ON_ONCE(rq->balance_callback); WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback; rq->balance_callback = &balance_push_callback;
...@@ -7828,8 +7837,8 @@ int sched_cpu_activate(unsigned int cpu) ...@@ -7828,8 +7837,8 @@ int sched_cpu_activate(unsigned int cpu)
struct rq_flags rf; struct rq_flags rf;
/* /*
* Make sure that when the hotplug state machine does a roll-back * Clear the balance_push callback and prepare to schedule
* we clear balance_push. Ideally that would happen earlier... * regular tasks.
*/ */
balance_push_set(cpu, false); balance_push_set(cpu, false);
...@@ -8014,12 +8023,6 @@ int sched_cpu_dying(unsigned int cpu) ...@@ -8014,12 +8023,6 @@ int sched_cpu_dying(unsigned int cpu)
} }
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
/*
* Now that the CPU is offline, make sure we're welcome
* to new tasks once we come back up.
*/
balance_push_set(cpu, false);
calc_load_migrate(rq); calc_load_migrate(rq);
update_max_interval(); update_max_interval();
hrtick_clear(rq); hrtick_clear(rq);
...@@ -8204,7 +8207,7 @@ void __init sched_init(void) ...@@ -8204,7 +8207,7 @@ void __init sched_init(void)
rq->sd = NULL; rq->sd = NULL;
rq->rd = NULL; rq->rd = NULL;
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
rq->balance_callback = NULL; rq->balance_callback = &balance_push_callback;
rq->active_balance = 0; rq->active_balance = 0;
rq->next_balance = jiffies; rq->next_balance = jiffies;
rq->push_cpu = 0; rq->push_cpu = 0;
...@@ -8251,6 +8254,7 @@ void __init sched_init(void) ...@@ -8251,6 +8254,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
idle_thread_set_boot_cpu(); idle_thread_set_boot_cpu();
balance_push_set(smp_processor_id(), false);
#endif #endif
init_sched_fair_class(); init_sched_fair_class();
......
...@@ -983,7 +983,6 @@ struct rq { ...@@ -983,7 +983,6 @@ struct rq {
unsigned long cpu_capacity_orig; unsigned long cpu_capacity_orig;
struct callback_head *balance_callback; struct callback_head *balance_callback;
unsigned char balance_push;
unsigned char nohz_idle_balance; unsigned char nohz_idle_balance;
unsigned char idle_balance; unsigned char idle_balance;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment