Commit 1c3e5d3f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

x86/entry: Make entry_64_compat.S objtool clean

Currently entry_64_compat is exempt from objtool, but with vmlinux
mode there is no hiding it.

Make the following changes to make it pass:

 - change entry_SYSENTER_compat to STT_NOTYPE; it's not a function
   and doesn't have function type stack setup.

 - mark all STT_NOTYPE symbols with UNWIND_HINT_EMPTY; so we do
   validate them and don't treat them as unreachable.

 - don't abuse RSP as a temp register, this confuses objtool
   mightily as it (rightfully) thinks we're doing unspeakable
   things to the stack.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20200505134341.272248024@linutronix.de


parent a7ef9ba9
...@@ -11,8 +11,6 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector- ...@@ -11,8 +11,6 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-
CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
......
...@@ -46,12 +46,14 @@ ...@@ -46,12 +46,14 @@
* ebp user stack * ebp user stack
* 0(%ebp) arg6 * 0(%ebp) arg6
*/ */
SYM_FUNC_START(entry_SYSENTER_compat) SYM_CODE_START(entry_SYSENTER_compat)
UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */ /* Interrupts are off on entry. */
SWAPGS SWAPGS
/* We are about to clobber %rsp anyway, clobbering here is OK */ pushq %rax
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
popq %rax
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
...@@ -104,6 +106,9 @@ SYM_FUNC_START(entry_SYSENTER_compat) ...@@ -104,6 +106,9 @@ SYM_FUNC_START(entry_SYSENTER_compat)
xorl %r14d, %r14d /* nospec r14 */ xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */ pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */ xorl %r15d, %r15d /* nospec r15 */
UNWIND_HINT_REGS
cld cld
/* /*
...@@ -141,7 +146,7 @@ SYM_FUNC_START(entry_SYSENTER_compat) ...@@ -141,7 +146,7 @@ SYM_FUNC_START(entry_SYSENTER_compat)
popfq popfq
jmp .Lsysenter_flags_fixed jmp .Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
SYM_FUNC_END(entry_SYSENTER_compat) SYM_CODE_END(entry_SYSENTER_compat)
/* /*
* 32-bit SYSCALL entry. * 32-bit SYSCALL entry.
...@@ -191,6 +196,7 @@ SYM_FUNC_END(entry_SYSENTER_compat) ...@@ -191,6 +196,7 @@ SYM_FUNC_END(entry_SYSENTER_compat)
* 0(%esp) arg6 * 0(%esp) arg6
*/ */
SYM_CODE_START(entry_SYSCALL_compat) SYM_CODE_START(entry_SYSCALL_compat)
UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */ /* Interrupts are off on entry. */
swapgs swapgs
...@@ -241,6 +247,8 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) ...@@ -241,6 +247,8 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
pushq $0 /* pt_regs->r15 = 0 */ pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */ xorl %r15d, %r15d /* nospec r15 */
UNWIND_HINT_REGS
movq %rsp, %rdi movq %rsp, %rdi
call do_fast_syscall_32 call do_fast_syscall_32
/* XEN PV guests always use IRET path */ /* XEN PV guests always use IRET path */
...@@ -328,6 +336,7 @@ SYM_CODE_END(entry_SYSCALL_compat) ...@@ -328,6 +336,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
* ebp arg6 * ebp arg6
*/ */
SYM_CODE_START(entry_INT80_compat) SYM_CODE_START(entry_INT80_compat)
UNWIND_HINT_EMPTY
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
*/ */
...@@ -349,8 +358,11 @@ SYM_CODE_START(entry_INT80_compat) ...@@ -349,8 +358,11 @@ SYM_CODE_START(entry_INT80_compat)
/* Need to switch before accessing the thread stack. */ /* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
/* In the Xen PV case we already run on the thread stack. */ /* In the Xen PV case we already run on the thread stack. */
ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */ pushq 6*8(%rdi) /* regs->ss */
...@@ -389,6 +401,9 @@ SYM_CODE_START(entry_INT80_compat) ...@@ -389,6 +401,9 @@ SYM_CODE_START(entry_INT80_compat)
xorl %r14d, %r14d /* nospec r14 */ xorl %r14d, %r14d /* nospec r14 */
pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15 */ xorl %r15d, %r15d /* nospec r15 */
UNWIND_HINT_REGS
cld cld
movq %rsp, %rdi movq %rsp, %rdi
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment