Commit 8f6be6d8 authored by Brian Gerst's avatar Brian Gerst Committed by Thomas Gleixner

x86/smpboot: Remove initial_gs

Given its CPU#, each CPU can find its own per-cpu offset, and directly set
GSBASE accordingly. The global variable can be eliminated.
Signed-off-by: default avatarBrian Gerst <brgerst@gmail.com>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarUsama Arif <usama.arif@bytedance.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarUsama Arif <usama.arif@bytedance.com>
Tested-by: default avatarGuilherme G. Piccoli <gpiccoli@igalia.com>
Reviewed-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20230316222109.1940300-9-usama.arif@bytedance.com
parent c253b640
...@@ -59,7 +59,6 @@ extern struct real_mode_header *real_mode_header; ...@@ -59,7 +59,6 @@ extern struct real_mode_header *real_mode_header;
extern unsigned char real_mode_blob_end[]; extern unsigned char real_mode_blob_end[];
extern unsigned long initial_code; extern unsigned long initial_code;
extern unsigned long initial_gs;
extern unsigned long initial_stack; extern unsigned long initial_stack;
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
extern unsigned long initial_vc_handler; extern unsigned long initial_vc_handler;
......
...@@ -127,7 +127,6 @@ int x86_acpi_suspend_lowlevel(void) ...@@ -127,7 +127,6 @@ int x86_acpi_suspend_lowlevel(void)
* value is in the actual %rsp register. * value is in the actual %rsp register.
*/ */
current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack); current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack);
initial_gs = per_cpu_offset(smp_processor_id());
smpboot_control = smp_processor_id(); smpboot_control = smp_processor_id();
#endif #endif
initial_code = (unsigned long)wakeup_long64; initial_code = (unsigned long)wakeup_long64;
......
...@@ -66,18 +66,10 @@ SYM_CODE_START_NOALIGN(startup_64) ...@@ -66,18 +66,10 @@ SYM_CODE_START_NOALIGN(startup_64)
leaq _text(%rip), %rdi leaq _text(%rip), %rdi
/* /* Setup GSBASE to allow stack canary access for C code */
* initial_gs points to initial fixed_percpu_data struct with storage for
* the stack protector canary. Global pointer fixups are needed at this
* stage, so apply them as is done in fixup_pointer(), and initialize %gs
* such that the canary can be accessed at %gs:40 for subsequent C calls.
*/
movl $MSR_GS_BASE, %ecx movl $MSR_GS_BASE, %ecx
movq initial_gs(%rip), %rax leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
movq $_text, %rdx movl %edx, %eax
subq %rdx, %rax
addq %rdi, %rax
movq %rax, %rdx
shrq $32, %rdx shrq $32, %rdx
wrmsr wrmsr
...@@ -294,8 +286,11 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) ...@@ -294,8 +286,11 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
* the per cpu areas are set up. * the per cpu areas are set up.
*/ */
movl $MSR_GS_BASE,%ecx movl $MSR_GS_BASE,%ecx
movl initial_gs(%rip),%eax #ifndef CONFIG_SMP
movl initial_gs+4(%rip),%edx leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
#endif
movl %edx, %eax
shrq $32, %rdx
wrmsr wrmsr
/* Setup and Load IDT */ /* Setup and Load IDT */
...@@ -437,7 +432,6 @@ SYM_CODE_END(vc_boot_ghcb) ...@@ -437,7 +432,6 @@ SYM_CODE_END(vc_boot_ghcb)
__REFDATA __REFDATA
.balign 8 .balign 8
SYM_DATA(initial_code, .quad x86_64_start_kernel) SYM_DATA(initial_code, .quad x86_64_start_kernel)
SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
#endif #endif
......
...@@ -1059,8 +1059,6 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -1059,8 +1059,6 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */ /* Stack for startup_32 can be just as for start_secondary onwards */
per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle); per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle);
#else
initial_gs = per_cpu_offset(cpu);
#endif #endif
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment