Commit 8b5a0f95 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Peter Zijlstra

x86/smpboot: Enable split CPU startup

The x86 CPU bringup state currently does AP wake-up, wait for AP to
respond and then release it for full bringup.

It is safe to be split into a wake-up and and a separate wait+release
state.

Provide the required functions and enable the split CPU bringup, which
prepares for parallel bringup, where the bringup of the non-boot CPUs takes
two iterations: One to prepare and wake all APs and the second to wait and
release them. Depending on timing this can eliminate the wait time
completely.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarMichael Kelley <mikelley@microsoft.com>
Tested-by: default avatarOleksandr Natalenko <oleksandr@natalenko.name>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck
Link: https://lore.kernel.org/r/20230512205257.133453992@linutronix.de
parent a631be92
...@@ -274,8 +274,8 @@ config X86 ...@@ -274,8 +274,8 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER select HAVE_USER_RETURN_NOTIFIER
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HOTPLUG_CORE_SYNC_FULL if SMP
select HOTPLUG_SMT if SMP select HOTPLUG_SMT if SMP
select HOTPLUG_SPLIT_STARTUP if SMP
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK
......
...@@ -40,7 +40,7 @@ struct smp_ops { ...@@ -40,7 +40,7 @@ struct smp_ops {
void (*cleanup_dead_cpu)(unsigned cpu); void (*cleanup_dead_cpu)(unsigned cpu);
void (*poll_sync_state)(void); void (*poll_sync_state)(void);
int (*cpu_up)(unsigned cpu, struct task_struct *tidle); int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle);
int (*cpu_disable)(void); int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu); void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void); void (*play_dead)(void);
...@@ -80,11 +80,6 @@ static inline void smp_cpus_done(unsigned int max_cpus) ...@@ -80,11 +80,6 @@ static inline void smp_cpus_done(unsigned int max_cpus)
smp_ops.smp_cpus_done(max_cpus); smp_ops.smp_cpus_done(max_cpus);
} }
static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
return smp_ops.cpu_up(cpu, tidle);
}
static inline int __cpu_disable(void) static inline int __cpu_disable(void)
{ {
return smp_ops.cpu_disable(); return smp_ops.cpu_disable();
...@@ -124,7 +119,7 @@ void native_smp_prepare_cpus(unsigned int max_cpus); ...@@ -124,7 +119,7 @@ void native_smp_prepare_cpus(unsigned int max_cpus);
void calculate_max_logical_packages(void); void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus);
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
int native_cpu_disable(void); int native_cpu_disable(void);
void __noreturn hlt_play_dead(void); void __noreturn hlt_play_dead(void);
void native_play_dead(void); void native_play_dead(void);
......
...@@ -268,7 +268,7 @@ struct smp_ops smp_ops = { ...@@ -268,7 +268,7 @@ struct smp_ops smp_ops = {
#endif #endif
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up, .kick_ap_alive = native_kick_ap,
.cpu_disable = native_cpu_disable, .cpu_disable = native_cpu_disable,
.play_dead = native_play_dead, .play_dead = native_play_dead,
......
...@@ -1052,7 +1052,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) ...@@ -1052,7 +1052,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
return ret; return ret;
} }
static int native_kick_ap(unsigned int cpu, struct task_struct *tidle) int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
{ {
int apicid = apic->cpu_present_to_apicid(cpu); int apicid = apic->cpu_present_to_apicid(cpu);
int err; int err;
...@@ -1088,15 +1088,15 @@ static int native_kick_ap(unsigned int cpu, struct task_struct *tidle) ...@@ -1088,15 +1088,15 @@ static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
return err; return err;
} }
int native_cpu_up(unsigned int cpu, struct task_struct *tidle) int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
{ {
return native_kick_ap(cpu, tidle); return smp_ops.kick_ap_alive(cpu, tidle);
} }
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
{ {
/* Cleanup possible dangling ends... */ /* Cleanup possible dangling ends... */
if (smp_ops.cpu_up == native_cpu_up && x86_platform.legacy.warm_reset) if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset)
smpboot_restore_warm_reset_vector(); smpboot_restore_warm_reset_vector();
} }
......
...@@ -314,7 +314,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -314,7 +314,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
return 0; return 0;
} }
static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle) static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
{ {
int rc; int rc;
...@@ -438,7 +438,7 @@ static const struct smp_ops xen_smp_ops __initconst = { ...@@ -438,7 +438,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_cpus = xen_pv_smp_prepare_cpus, .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
.smp_cpus_done = xen_smp_cpus_done, .smp_cpus_done = xen_smp_cpus_done,
.cpu_up = xen_pv_cpu_up, .kick_ap_alive = xen_pv_kick_ap,
.cpu_die = xen_pv_cpu_die, .cpu_die = xen_pv_cpu_die,
.cleanup_dead_cpu = xen_pv_cleanup_dead_cpu, .cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
.poll_sync_state = xen_pv_poll_sync_state, .poll_sync_state = xen_pv_poll_sync_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment