Commit 22b612e2 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Peter Zijlstra

cpu/hotplug: Rework sparse_irq locking in bringup_cpu()

There is no harm to hold sparse_irq lock until the upcoming CPU completes
in cpuhp_online_idle(). This allows to remove cpu_online() synchronization
from architecture code.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarMichael Kelley <mikelley@microsoft.com>
Tested-by: default avatarOleksandr Natalenko <oleksandr@natalenko.name>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck
Link: https://lore.kernel.org/r/20230512205256.263722880@linutronix.de
parent c8b7fb09
...@@ -558,7 +558,7 @@ static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st, ...@@ -558,7 +558,7 @@ static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
return ret; return ret;
} }
static int bringup_wait_for_ap(unsigned int cpu) static int bringup_wait_for_ap_online(unsigned int cpu)
{ {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
...@@ -579,15 +579,12 @@ static int bringup_wait_for_ap(unsigned int cpu) ...@@ -579,15 +579,12 @@ static int bringup_wait_for_ap(unsigned int cpu)
*/ */
if (!cpu_smt_allowed(cpu)) if (!cpu_smt_allowed(cpu))
return -ECANCELED; return -ECANCELED;
return 0;
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(cpu, st, st->target);
} }
static int bringup_cpu(unsigned int cpu) static int bringup_cpu(unsigned int cpu)
{ {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle = idle_thread_get(cpu); struct task_struct *idle = idle_thread_get(cpu);
int ret; int ret;
...@@ -600,16 +597,33 @@ static int bringup_cpu(unsigned int cpu) ...@@ -600,16 +597,33 @@ static int bringup_cpu(unsigned int cpu)
/* /*
* Some architectures have to walk the irq descriptors to * Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online. * setup the vector space for the cpu which comes online.
* Prevent irq alloc/free across the bringup. *
* Prevent irq alloc/free across the bringup by acquiring the
* sparse irq lock. Hold it until the upcoming CPU completes the
* startup in cpuhp_online_idle() which allows to avoid
* intermediate synchronization points in the architecture code.
*/ */
irq_lock_sparse(); irq_lock_sparse();
/* Arch-specific enabling code. */ /* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle); ret = __cpu_up(cpu, idle);
irq_unlock_sparse();
if (ret) if (ret)
return ret; goto out_unlock;
return bringup_wait_for_ap(cpu);
ret = bringup_wait_for_ap_online(cpu);
if (ret)
goto out_unlock;
irq_unlock_sparse();
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(cpu, st, st->target);
out_unlock:
irq_unlock_sparse();
return ret;
} }
static int finish_cpu(unsigned int cpu) static int finish_cpu(unsigned int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment