Commit f2db9382 authored by Denys Vlasenko's avatar Denys Vlasenko Committed by Ingo Molnar

x86/asm/entry: Do mass removal of 'ARGOFFSET'

ARGOFFSET is zero now, removing it changes no code.

A few macros lost "offset" parameter, since it is always zero
now too.

No code changes - verified with objdump.
Signed-off-by: default avatarDenys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/8689f937622d9d2db0ab8be82331fa15e4ed4713.1424989793.git.luto@amacapital.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0d550836
This diff is collapsed.
...@@ -88,8 +88,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -88,8 +88,6 @@ For 32-bit we have the following conventions - kernel is built with
#define RSP 19*8 #define RSP 19*8
#define SS 20*8 #define SS 20*8
#define ARGOFFSET 0
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
subq $15*8+\addskip, %rsp subq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 15*8+\addskip CFI_ADJUST_CFA_OFFSET 15*8+\addskip
......
...@@ -73,9 +73,9 @@ ENDPROC(native_usergs_sysret64) ...@@ -73,9 +73,9 @@ ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET .macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ bt $9,EFLAGS(%rsp) /* interrupts off? */
jnc 1f jnc 1f
TRACE_IRQS_ON TRACE_IRQS_ON
1: 1:
...@@ -107,8 +107,8 @@ ENDPROC(native_usergs_sysret64) ...@@ -107,8 +107,8 @@ ENDPROC(native_usergs_sysret64)
call debug_stack_reset call debug_stack_reset
.endm .endm
.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET .macro TRACE_IRQS_IRETQ_DEBUG
bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ bt $9,EFLAGS(%rsp) /* interrupts off? */
jnc 1f jnc 1f
TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON_DEBUG
1: 1:
...@@ -184,16 +184,16 @@ ENDPROC(native_usergs_sysret64) ...@@ -184,16 +184,16 @@ ENDPROC(native_usergs_sysret64)
* frame that enables passing a complete pt_regs to a C function. * frame that enables passing a complete pt_regs to a C function.
*/ */
.macro DEFAULT_FRAME start=1 offset=0 .macro DEFAULT_FRAME start=1 offset=0
XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET XCPT_FRAME \start, ORIG_RAX+\offset
CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET CFI_REL_OFFSET rdi, RDI+\offset
CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET CFI_REL_OFFSET rsi, RSI+\offset
CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET CFI_REL_OFFSET rdx, RDX+\offset
CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET CFI_REL_OFFSET rcx, RCX+\offset
CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET CFI_REL_OFFSET rax, RAX+\offset
CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET CFI_REL_OFFSET r8, R8+\offset
CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET CFI_REL_OFFSET r9, R9+\offset
CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET CFI_REL_OFFSET r10, R10+\offset
CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET CFI_REL_OFFSET r11, R11+\offset
CFI_REL_OFFSET rbx, RBX+\offset CFI_REL_OFFSET rbx, RBX+\offset
CFI_REL_OFFSET rbp, RBP+\offset CFI_REL_OFFSET rbp, RBP+\offset
CFI_REL_OFFSET r12, R12+\offset CFI_REL_OFFSET r12, R12+\offset
...@@ -237,13 +237,13 @@ ENTRY(ret_from_fork) ...@@ -237,13 +237,13 @@ ENTRY(ret_from_fork)
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? testl $3,CS(%rsp) # from kernel_thread?
jz 1f jz 1f
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
jnz int_ret_from_sys_call jnz int_ret_from_sys_call
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET RESTORE_TOP_OF_STACK %rdi
jmp ret_from_sys_call # go to the SYSRET fastpath jmp ret_from_sys_call # go to the SYSRET fastpath
1: 1:
...@@ -310,11 +310,11 @@ GLOBAL(system_call_after_swapgs) ...@@ -310,11 +310,11 @@ GLOBAL(system_call_after_swapgs)
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
ALLOC_PT_GPREGS_ON_STACK 8 ALLOC_PT_GPREGS_ON_STACK 8
SAVE_C_REGS_EXCEPT_RAX_RCX SAVE_C_REGS_EXCEPT_RAX_RCX
movq $-ENOSYS,RAX-ARGOFFSET(%rsp) movq $-ENOSYS,RAX(%rsp)
movq_cfi rax,(ORIG_RAX-ARGOFFSET) movq_cfi rax,ORIG_RAX
movq %rcx,RIP-ARGOFFSET(%rsp) movq %rcx,RIP(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET CFI_REL_OFFSET rip,RIP
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP)
jnz tracesys jnz tracesys
system_call_fastpath: system_call_fastpath:
#if __SYSCALL_MASK == ~0 #if __SYSCALL_MASK == ~0
...@@ -326,13 +326,13 @@ system_call_fastpath: ...@@ -326,13 +326,13 @@ system_call_fastpath:
ja ret_from_sys_call /* and return regs->ax */ ja ret_from_sys_call /* and return regs->ax */
movq %r10,%rcx movq %r10,%rcx
call *sys_call_table(,%rax,8) # XXX: rip relative call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp) movq %rax,RAX(%rsp)
/* /*
* Syscall return path ending with SYSRET (fast path) * Syscall return path ending with SYSRET (fast path)
* Has incomplete stack frame and undefined top of stack. * Has incomplete stack frame and undefined top of stack.
*/ */
ret_from_sys_call: ret_from_sys_call:
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */ jnz int_ret_from_sys_call_fixup /* Go the the slow path */
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
...@@ -344,7 +344,7 @@ ret_from_sys_call: ...@@ -344,7 +344,7 @@ ret_from_sys_call:
*/ */
TRACE_IRQS_ON TRACE_IRQS_ON
RESTORE_C_REGS_EXCEPT_RCX RESTORE_C_REGS_EXCEPT_RCX
movq RIP-ARGOFFSET(%rsp),%rcx movq RIP(%rsp),%rcx
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/ /*CFI_REGISTER rflags,r11*/
movq PER_CPU_VAR(old_rsp), %rsp movq PER_CPU_VAR(old_rsp), %rsp
...@@ -353,7 +353,7 @@ ret_from_sys_call: ...@@ -353,7 +353,7 @@ ret_from_sys_call:
CFI_RESTORE_STATE CFI_RESTORE_STATE
int_ret_from_sys_call_fixup: int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET FIXUP_TOP_OF_STACK %r11
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
/* Do syscall tracing */ /* Do syscall tracing */
...@@ -364,7 +364,7 @@ tracesys: ...@@ -364,7 +364,7 @@ tracesys:
test %rax, %rax test %rax, %rax
jnz tracesys_phase2 /* if needed, run the slow path */ jnz tracesys_phase2 /* if needed, run the slow path */
RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
movq ORIG_RAX-ARGOFFSET(%rsp), %rax movq ORIG_RAX(%rsp), %rax
jmp system_call_fastpath /* and return to the fast path */ jmp system_call_fastpath /* and return to the fast path */
tracesys_phase2: tracesys_phase2:
...@@ -391,7 +391,7 @@ tracesys_phase2: ...@@ -391,7 +391,7 @@ tracesys_phase2:
ja int_ret_from_sys_call /* RAX(%rsp) is already set */ ja int_ret_from_sys_call /* RAX(%rsp) is already set */
movq %r10,%rcx /* fixup for C */ movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8) call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp) movq %rax,RAX(%rsp)
/* Use IRET because user could have changed frame */ /* Use IRET because user could have changed frame */
/* /*
...@@ -475,9 +475,9 @@ END(stub_\func) ...@@ -475,9 +475,9 @@ END(stub_\func)
ENTRY(\label) ENTRY(\label)
CFI_STARTPROC CFI_STARTPROC
DEFAULT_FRAME 0, 8 /* offset 8: return address */ DEFAULT_FRAME 0, 8 /* offset 8: return address */
FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET FIXUP_TOP_OF_STACK %r11, 8
call \func call \func
RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET RESTORE_TOP_OF_STACK %r11, 8
ret ret
CFI_ENDPROC CFI_ENDPROC
END(\label) END(\label)
...@@ -677,7 +677,7 @@ common_interrupt: ...@@ -677,7 +677,7 @@ common_interrupt:
ASM_CLAC ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ interrupt do_IRQ
/* 0(%rsp): old_rsp-ARGOFFSET */ /* 0(%rsp): old_rsp */
ret_from_intr: ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -687,13 +687,13 @@ ret_from_intr: ...@@ -687,13 +687,13 @@ ret_from_intr:
popq %rsi popq %rsi
CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */ CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
/* return code expects complete pt_regs - adjust rsp accordingly: */ /* return code expects complete pt_regs - adjust rsp accordingly: */
leaq ARGOFFSET-RBP(%rsi), %rsp leaq -RBP(%rsi),%rsp
CFI_DEF_CFA_REGISTER rsp CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET CFI_ADJUST_CFA_OFFSET RBP
exit_intr: exit_intr:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp) testl $3,CS(%rsp)
je retint_kernel je retint_kernel
/* Interrupt came from user space */ /* Interrupt came from user space */
...@@ -721,8 +721,8 @@ retint_swapgs: /* return to user-space */ ...@@ -721,8 +721,8 @@ retint_swapgs: /* return to user-space */
* Try to use SYSRET instead of IRET if we're returning to * Try to use SYSRET instead of IRET if we're returning to
* a completely clean 64-bit userspace context. * a completely clean 64-bit userspace context.
*/ */
movq (RCX-ARGOFFSET)(%rsp), %rcx movq RCX(%rsp),%rcx
cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */ cmpq %rcx,RIP(%rsp) /* RCX == RIP */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
/* /*
...@@ -743,11 +743,11 @@ retint_swapgs: /* return to user-space */ ...@@ -743,11 +743,11 @@ retint_swapgs: /* return to user-space */
shr $__VIRTUAL_MASK_SHIFT, %rcx shr $__VIRTUAL_MASK_SHIFT, %rcx
jnz opportunistic_sysret_failed jnz opportunistic_sysret_failed
cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */ cmpq $__USER_CS,CS(%rsp) /* CS must match SYSRET */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
movq (R11-ARGOFFSET)(%rsp), %r11 movq R11(%rsp),%r11
cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ cmpq %r11,EFLAGS(%rsp) /* R11 == RFLAGS */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */ testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
...@@ -755,7 +755,7 @@ retint_swapgs: /* return to user-space */ ...@@ -755,7 +755,7 @@ retint_swapgs: /* return to user-space */
/* nothing to check for RSP */ /* nothing to check for RSP */
cmpq $__USER_DS,(SS-ARGOFFSET)(%rsp) /* SS must match SYSRET */ cmpq $__USER_DS,SS(%rsp) /* SS must match SYSRET */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
/* /*
...@@ -870,7 +870,7 @@ retint_signal: ...@@ -870,7 +870,7 @@ retint_signal:
ENTRY(retint_kernel) ENTRY(retint_kernel)
cmpl $0,PER_CPU_VAR(__preempt_count) cmpl $0,PER_CPU_VAR(__preempt_count)
jnz retint_restore_args jnz retint_restore_args
bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ bt $9,EFLAGS(%rsp) /* interrupts off? */
jnc retint_restore_args jnc retint_restore_args
call preempt_schedule_irq call preempt_schedule_irq
jmp exit_intr jmp exit_intr
...@@ -1286,11 +1286,11 @@ ENTRY(paranoid_exit) ...@@ -1286,11 +1286,11 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */ testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_exit_no_swapgs jnz paranoid_exit_no_swapgs
TRACE_IRQS_IRETQ 0 TRACE_IRQS_IRETQ
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
jmp paranoid_exit_restore jmp paranoid_exit_restore
paranoid_exit_no_swapgs: paranoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG 0 TRACE_IRQS_IRETQ_DEBUG
paranoid_exit_restore: paranoid_exit_restore:
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
RESTORE_C_REGS RESTORE_C_REGS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment