Commit 131484c8 authored by Ingo Molnar's avatar Ingo Molnar

x86/debug: Remove perpetually broken, unmaintainable dwarf annotations

So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

   27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

 - it should be maximally readable, and maximally low-key to
   'ordinary' code reading and maintenance.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for pop/push
   instructions that manipulate the stack pointer. This could
   be done for example via a preprocessing step that just
   looks for common patterns - plus special annotations for
   the few cases where we want to depart from the default.
   We have hundreds of CFI annotations, so automating most of
   that makes sense.

 - it should come with build tooling checks that ensure that
   CFI annotations are sensible. We've seen such efforts from
   the framepointer side, and there's no reason it couldn't be
   done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cdeb6048
...@@ -149,12 +149,6 @@ endif ...@@ -149,12 +149,6 @@ endif
sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp sp-$(CONFIG_X86_64) := rsp
# do binutils support CFI?
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
# does binutils support specific instructions? # does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1) ...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS := -m elf_$(UTS_MACHINE)
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Copyright 2000-2002 Andi Kleen, SuSE Labs. * Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/ */
#include <asm/dwarf2.h>
#include <asm/calling.h> #include <asm/calling.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/current.h> #include <asm/current.h>
...@@ -60,17 +59,6 @@ ...@@ -60,17 +59,6 @@
movl %eax,%eax /* zero extension */ movl %eax,%eax /* zero extension */
.endm .endm
.macro CFI_STARTPROC32 simple
CFI_STARTPROC \simple
CFI_UNDEFINED r8
CFI_UNDEFINED r9
CFI_UNDEFINED r10
CFI_UNDEFINED r11
CFI_UNDEFINED r12
CFI_UNDEFINED r13
CFI_UNDEFINED r14
CFI_UNDEFINED r15
.endm
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret32) ENTRY(native_usergs_sysret32)
...@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32) ...@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32)
* with the int 0x80 path. * with the int 0x80 path.
*/ */
ENTRY(ia32_sysenter_target) ENTRY(ia32_sysenter_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target) ...@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target)
movl %eax, %eax movl %eax, %eax
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
CFI_REGISTER rip,r10
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */ pushq $__USER32_DS /* pt_regs->ss */
pushq_cfi %rbp /* pt_regs->sp */ pushq %rbp /* pt_regs->sp */
CFI_REL_OFFSET rsp,0 pushfq /* pt_regs->flags */
pushfq_cfi /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */
pushq_cfi $__USER32_CS /* pt_regs->cs */ pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */ pushq %rax /* pt_regs->orig_ax */
CFI_REL_OFFSET rip,0 pushq %rdi /* pt_regs->di */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rcx /* pt_regs->cx */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq $-ENOSYS /* pt_regs->ax */
pushq_cfi_reg rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */
cld cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/* /*
* no need to do an access_ok check here because rbp has been * no need to do an access_ok check here because rbp has been
...@@ -161,8 +140,8 @@ sysenter_flags_fixed: ...@@ -161,8 +140,8 @@ sysenter_flags_fixed:
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz sysenter_tracesys jnz sysenter_tracesys
sysenter_do_call: sysenter_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32bit syscall -> 64bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi,%r8d /* arg5 */
...@@ -193,14 +172,12 @@ sysexit_from_sys_call: ...@@ -193,14 +172,12 @@ sysexit_from_sys_call:
*/ */
andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp),%ecx /* User %eip */ movl RIP(%rsp),%ecx /* User %eip */
CFI_REGISTER rip,rcx
RESTORE_RSI_RDI RESTORE_RSI_RDI
xorl %edx,%edx /* avoid info leaks */ xorl %edx,%edx /* avoid info leaks */
xorq %r8,%r8 xorq %r8,%r8
xorq %r9,%r9 xorq %r9,%r9
xorq %r10,%r10 xorq %r10,%r10
movl EFLAGS(%rsp),%r11d /* User eflags */ movl EFLAGS(%rsp),%r11d /* User eflags */
/*CFI_RESTORE rflags*/
TRACE_IRQS_ON TRACE_IRQS_ON
/* /*
...@@ -231,8 +208,6 @@ sysexit_from_sys_call: ...@@ -231,8 +208,6 @@ sysexit_from_sys_call:
*/ */
USERGS_SYSRET32 USERGS_SYSRET32
CFI_RESTORE_STATE
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
.macro auditsys_entry_common .macro auditsys_entry_common
movl %esi,%r8d /* 5th arg: 4th syscall arg */ movl %esi,%r8d /* 5th arg: 4th syscall arg */
...@@ -282,8 +257,8 @@ sysexit_audit: ...@@ -282,8 +257,8 @@ sysexit_audit:
#endif #endif
sysenter_fix_flags: sysenter_fix_flags:
pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq_cfi popfq
jmp sysenter_flags_fixed jmp sysenter_flags_fixed
sysenter_tracesys: sysenter_tracesys:
...@@ -298,7 +273,6 @@ sysenter_tracesys: ...@@ -298,7 +273,6 @@ sysenter_tracesys:
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp sysenter_do_call jmp sysenter_do_call
CFI_ENDPROC
ENDPROC(ia32_sysenter_target) ENDPROC(ia32_sysenter_target)
/* /*
...@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target) ...@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target)
* with the int 0x80 path. * with the int 0x80 path.
*/ */
ENTRY(ia32_cstar_target) ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target) ...@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target)
*/ */
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
movl %esp,%r8d movl %esp,%r8d
CFI_REGISTER rsp,r8
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
...@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target) ...@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target)
movl %eax,%eax movl %eax,%eax
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */ pushq $__USER32_DS /* pt_regs->ss */
pushq_cfi %r8 /* pt_regs->sp */ pushq %r8 /* pt_regs->sp */
CFI_REL_OFFSET rsp,0 pushq %r11 /* pt_regs->flags */
pushq_cfi %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */
pushq_cfi $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */
pushq_cfi %rcx /* pt_regs->ip */ pushq %rax /* pt_regs->orig_ax */
CFI_REL_OFFSET rip,0 pushq %rdi /* pt_regs->di */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rbp /* pt_regs->cx */
pushq_cfi_reg rdx /* pt_regs->dx */
pushq_cfi_reg rbp /* pt_regs->cx */
movl %ebp,%ecx movl %ebp,%ecx
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq $-ENOSYS /* pt_regs->ax */
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/* /*
* no need to do an access_ok check here because r8 has been * no need to do an access_ok check here because r8 has been
...@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target) ...@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target)
ASM_CLAC ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz cstar_tracesys jnz cstar_tracesys
cstar_do_call: cstar_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32bit syscall -> 64bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi,%r8d /* arg5 */
...@@ -403,15 +367,12 @@ sysretl_from_sys_call: ...@@ -403,15 +367,12 @@ sysretl_from_sys_call:
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
RESTORE_RSI_RDI_RDX RESTORE_RSI_RDI_RDX
movl RIP(%rsp),%ecx movl RIP(%rsp),%ecx
CFI_REGISTER rip,rcx
movl EFLAGS(%rsp),%r11d movl EFLAGS(%rsp),%r11d
/*CFI_REGISTER rflags,r11*/
xorq %r10,%r10 xorq %r10,%r10
xorq %r9,%r9 xorq %r9,%r9
xorq %r8,%r8 xorq %r8,%r8
TRACE_IRQS_ON TRACE_IRQS_ON
movl RSP(%rsp),%esp movl RSP(%rsp),%esp
CFI_RESTORE rsp
/* /*
* 64bit->32bit SYSRET restores eip from ecx, * 64bit->32bit SYSRET restores eip from ecx,
* eflags from r11 (but RF and VM bits are forced to 0), * eflags from r11 (but RF and VM bits are forced to 0),
...@@ -430,7 +391,6 @@ sysretl_from_sys_call: ...@@ -430,7 +391,6 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
cstar_auditsys: cstar_auditsys:
CFI_RESTORE_STATE
movl %r9d,R9(%rsp) /* register to be clobbered by call */ movl %r9d,R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common auditsys_entry_common
movl R9(%rsp),%r9d /* reload 6th syscall arg */ movl R9(%rsp),%r9d /* reload 6th syscall arg */
...@@ -460,7 +420,6 @@ ia32_badarg: ...@@ -460,7 +420,6 @@ ia32_badarg:
ASM_CLAC ASM_CLAC
movq $-EFAULT,%rax movq $-EFAULT,%rax
jmp ia32_sysret jmp ia32_sysret
CFI_ENDPROC
/* /*
* Emulated IA32 system calls via int 0x80. * Emulated IA32 system calls via int 0x80.
...@@ -484,15 +443,6 @@ ia32_badarg: ...@@ -484,15 +443,6 @@ ia32_badarg:
*/ */
ENTRY(ia32_syscall) ENTRY(ia32_syscall)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,5*8
/*CFI_REL_OFFSET ss,4*8 */
CFI_REL_OFFSET rsp,3*8
/*CFI_REL_OFFSET rflags,2*8 */
/*CFI_REL_OFFSET cs,1*8 */
CFI_REL_OFFSET rip,0*8
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -506,15 +456,14 @@ ENTRY(ia32_syscall) ...@@ -506,15 +456,14 @@ ENTRY(ia32_syscall)
movl %eax,%eax movl %eax,%eax
/* Construct struct pt_regs on stack (iret frame is already on stack) */ /* Construct struct pt_regs on stack (iret frame is already on stack) */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq $-ENOSYS /* pt_regs->ax */
cld cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
...@@ -544,7 +493,6 @@ ia32_tracesys: ...@@ -544,7 +493,6 @@ ia32_tracesys:
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp ia32_do_call jmp ia32_do_call
CFI_ENDPROC
END(ia32_syscall) END(ia32_syscall)
.macro PTREGSCALL label, func .macro PTREGSCALL label, func
...@@ -554,8 +502,6 @@ GLOBAL(\label) ...@@ -554,8 +502,6 @@ GLOBAL(\label)
jmp ia32_ptregs_common jmp ia32_ptregs_common
.endm .endm
CFI_STARTPROC32
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
PTREGSCALL stub32_sigreturn, sys32_sigreturn PTREGSCALL stub32_sigreturn, sys32_sigreturn
PTREGSCALL stub32_fork, sys_fork PTREGSCALL stub32_fork, sys_fork
...@@ -569,23 +515,8 @@ GLOBAL(stub32_clone) ...@@ -569,23 +515,8 @@ GLOBAL(stub32_clone)
ALIGN ALIGN
ia32_ptregs_common: ia32_ptregs_common:
CFI_ENDPROC
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,SIZEOF_PTREGS
CFI_REL_OFFSET rax,RAX
CFI_REL_OFFSET rcx,RCX
CFI_REL_OFFSET rdx,RDX
CFI_REL_OFFSET rsi,RSI
CFI_REL_OFFSET rdi,RDI
CFI_REL_OFFSET rip,RIP
/* CFI_REL_OFFSET cs,CS*/
/* CFI_REL_OFFSET rflags,EFLAGS*/
CFI_REL_OFFSET rsp,RSP
/* CFI_REL_OFFSET ss,SS*/
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
call *%rax call *%rax
RESTORE_EXTRA_REGS 8 RESTORE_EXTRA_REGS 8
ret ret
CFI_ENDPROC
END(ia32_ptregs_common) END(ia32_ptregs_common)
...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
#include <asm/dwarf2.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
...@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
subq $15*8+\addskip, %rsp subq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 15*8+\addskip
.endm .endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
.if \r11 .if \r11
movq_cfi r11, 6*8+\offset movq %r11, 6*8+\offset(%rsp)
.endif .endif
.if \r8910 .if \r8910
movq_cfi r10, 7*8+\offset movq %r10, 7*8+\offset(%rsp)
movq_cfi r9, 8*8+\offset movq %r9, 8*8+\offset(%rsp)
movq_cfi r8, 9*8+\offset movq %r8, 9*8+\offset(%rsp)
.endif .endif
.if \rax .if \rax
movq_cfi rax, 10*8+\offset movq %rax, 10*8+\offset(%rsp)
.endif .endif
.if \rcx .if \rcx
movq_cfi rcx, 11*8+\offset movq %rcx, 11*8+\offset(%rsp)
.endif .endif
movq_cfi rdx, 12*8+\offset movq %rdx, 12*8+\offset(%rsp)
movq_cfi rsi, 13*8+\offset movq %rsi, 13*8+\offset(%rsp)
movq_cfi rdi, 14*8+\offset movq %rdi, 14*8+\offset(%rsp)
.endm .endm
.macro SAVE_C_REGS offset=0 .macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro SAVE_EXTRA_REGS offset=0 .macro SAVE_EXTRA_REGS offset=0
movq_cfi r15, 0*8+\offset movq %r15, 0*8+\offset(%rsp)
movq_cfi r14, 1*8+\offset movq %r14, 1*8+\offset(%rsp)
movq_cfi r13, 2*8+\offset movq %r13, 2*8+\offset(%rsp)
movq_cfi r12, 3*8+\offset movq %r12, 3*8+\offset(%rsp)
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
movq_cfi rbx, 5*8+\offset movq %rbx, 5*8+\offset(%rsp)
.endm .endm
.macro SAVE_EXTRA_REGS_RBP offset=0 .macro SAVE_EXTRA_REGS_RBP offset=0
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
.endm .endm
.macro RESTORE_EXTRA_REGS offset=0 .macro RESTORE_EXTRA_REGS offset=0
movq_cfi_restore 0*8+\offset, r15 movq 0*8+\offset(%rsp), %r15
movq_cfi_restore 1*8+\offset, r14 movq 1*8+\offset(%rsp), %r14
movq_cfi_restore 2*8+\offset, r13 movq 2*8+\offset(%rsp), %r13
movq_cfi_restore 3*8+\offset, r12 movq 3*8+\offset(%rsp), %r12
movq_cfi_restore 4*8+\offset, rbp movq 4*8+\offset(%rsp), %rbp
movq_cfi_restore 5*8+\offset, rbx movq 5*8+\offset(%rsp), %rbx
.endm .endm
.macro ZERO_EXTRA_REGS .macro ZERO_EXTRA_REGS
...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11 .if \rstor_r11
movq_cfi_restore 6*8, r11 movq 6*8(%rsp), %r11
.endif .endif
.if \rstor_r8910 .if \rstor_r8910
movq_cfi_restore 7*8, r10 movq 7*8(%rsp), %r10
movq_cfi_restore 8*8, r9 movq 8*8(%rsp), %r9
movq_cfi_restore 9*8, r8 movq 9*8(%rsp), %r8
.endif .endif
.if \rstor_rax .if \rstor_rax
movq_cfi_restore 10*8, rax movq 10*8(%rsp), %rax
.endif .endif
.if \rstor_rcx .if \rstor_rcx
movq_cfi_restore 11*8, rcx movq 11*8(%rsp), %rcx
.endif .endif
.if \rstor_rdx .if \rstor_rdx
movq_cfi_restore 12*8, rdx movq 12*8(%rsp), %rdx
.endif .endif
movq_cfi_restore 13*8, rsi movq 13*8(%rsp), %rsi
movq_cfi_restore 14*8, rdi movq 14*8(%rsp), %rdi
.endm .endm
.macro RESTORE_C_REGS .macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1 RESTORE_C_REGS_HELPER 1,1,1,1,1
...@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
addq $15*8+\addskip, %rsp addq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm .endm
.macro icebp .macro icebp
...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
.macro SAVE_ALL .macro SAVE_ALL
pushl_cfi_reg eax pushl %eax
pushl_cfi_reg ebp pushl %ebp
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg esi pushl %esi
pushl_cfi_reg edx pushl %edx
pushl_cfi_reg ecx pushl %ecx
pushl_cfi_reg ebx pushl %ebx
.endm .endm
.macro RESTORE_ALL .macro RESTORE_ALL
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg edx popl %edx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg eax popl %eax
.endm .endm
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
* Macros for dwarf2 CFI unwind table entries.
* See "as.info" for details on these pseudo ops. Unfortunately
* they are only supported in very new binutils, so define them
* away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else
/*
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_ESCAPE cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro pushq_cfi_reg reg
pushq %\reg
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET \reg, 0
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro popq_cfi_reg reg
popq %\reg
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE \reg
.endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro pushl_cfi_reg reg
pushl %\reg
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET \reg, 0
.endm
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro popl_cfi_reg reg
popl %\reg
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE \reg
.endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look /* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */ frame pointer later */
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
.macro FRAME .macro FRAME
__ASM_SIZE(push,_cfi) %__ASM_REG(bp) __ASM_SIZE(push,) %__ASM_REG(bp)
CFI_REL_OFFSET __ASM_REG(bp), 0
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm .endm
.macro ENDFRAME .macro ENDFRAME
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp) __ASM_SIZE(pop,) %__ASM_REG(bp)
CFI_RESTORE __ASM_REG(bp)
.endm .endm
#else #else
.macro FRAME .macro FRAME
......
This diff is collapsed.
This diff is collapsed.
...@@ -11,26 +11,23 @@ ...@@ -11,26 +11,23 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* if you want SMP support, implement these with real spinlocks */ /* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg .macro LOCK reg
pushfl_cfi pushfl
cli cli
.endm .endm
.macro UNLOCK reg .macro UNLOCK reg
popfl_cfi popfl
.endm .endm
#define BEGIN(op) \ #define BEGIN(op) \
.macro endp; \ .macro endp; \
CFI_ENDPROC; \
ENDPROC(atomic64_##op##_386); \ ENDPROC(atomic64_##op##_386); \
.purgem endp; \ .purgem endp; \
.endm; \ .endm; \
ENTRY(atomic64_##op##_386); \ ENTRY(atomic64_##op##_386); \
CFI_STARTPROC; \
LOCK v; LOCK v;
#define ENDP endp #define ENDP endp
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
.macro read64 reg .macro read64 reg
movl %ebx, %eax movl %ebx, %eax
...@@ -22,16 +21,11 @@ ...@@ -22,16 +21,11 @@
.endm .endm
ENTRY(atomic64_read_cx8) ENTRY(atomic64_read_cx8)
CFI_STARTPROC
read64 %ecx read64 %ecx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_read_cx8) ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8) ENTRY(atomic64_set_cx8)
CFI_STARTPROC
1: 1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes /* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */ * are atomic on 586 and newer */
...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8) ...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_set_cx8) ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8) ENTRY(atomic64_xchg_cx8)
CFI_STARTPROC
1: 1:
LOCK_PREFIX LOCK_PREFIX
cmpxchg8b (%esi) cmpxchg8b (%esi)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8) ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc .macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi
movl %eax, %esi movl %eax, %esi
movl %edx, %edi movl %edx, %edi
...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ebp popl %ebp
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -93,8 +81,7 @@ addsub_return sub sub sbb ...@@ -93,8 +81,7 @@ addsub_return sub sub sbb
.macro incdec_return func ins insc .macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -119,8 +105,7 @@ incdec_return inc add adc ...@@ -119,8 +105,7 @@ incdec_return inc add adc
incdec_return dec sub sbb incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8) ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8) ...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
2: 2:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8) ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8) ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx
/* these just push these two parameters on the stack */ /* these just push these two parameters on the stack */
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg ecx pushl %ecx
movl %eax, %ebp movl %eax, %ebp
movl %edx, %edi movl %edx, %edi
...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8) ...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
movl $1, %eax movl $1, %eax
3: 3:
addl $8, %esp addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8 popl %ebx
popl_cfi_reg ebx popl %ebp
popl_cfi_reg ebp
ret ret
4: 4:
cmpl %edx, 4(%esp) cmpl %edx, 4(%esp)
jne 2b jne 2b
xorl %eax, %eax xorl %eax, %eax
jmp 3b jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_add_unless_cx8) ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8) ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8) ...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax movl $1, %eax
3: 3:
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8) ENDPROC(atomic64_inc_not_zero_cx8)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff movl 12(%esp),%esi # Function arg: unsigned char *buff
...@@ -129,10 +127,9 @@ ENTRY(csum_partial) ...@@ -129,10 +127,9 @@ ENTRY(csum_partial)
jz 8f jz 8f
roll $8, %eax roll $8, %eax
8: 8:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#else #else
...@@ -140,9 +137,8 @@ ENDPROC(csum_partial) ...@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
/* Version for PentiumII/PPro */ /* Version for PentiumII/PPro */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf movl 12(%esp),%esi # Function arg: const unsigned char *buf
...@@ -249,10 +245,9 @@ ENTRY(csum_partial) ...@@ -249,10 +245,9 @@ ENTRY(csum_partial)
jz 90f jz 90f
roll $8, %eax roll $8, %eax
90: 90:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#endif #endif
...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, ...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define FP 12 #define FP 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
subl $4,%esp subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4 pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src movl ARGBASE+4(%esp),%esi # src
...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) ) ...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
.previous .previous
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi %ecx # equivalent to addl $4,%esp popl %ecx # equivalent to addl $4,%esp
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#else #else
...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic) ...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12 #define ARGBASE 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi
movl ARGBASE+4(%esp),%esi #src movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len movl ARGBASE+12(%esp),%ecx #len
...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) ) ...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
jmp 7b jmp 7b
.previous .previous
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#undef ROUND #undef ROUND
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -15,7 +14,6 @@ ...@@ -15,7 +14,6 @@
* %rdi - page * %rdi - page
*/ */
ENTRY(clear_page) ENTRY(clear_page)
CFI_STARTPROC
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
"jmp clear_page_c_e", X86_FEATURE_ERMS "jmp clear_page_c_e", X86_FEATURE_ERMS
...@@ -24,11 +22,9 @@ ENTRY(clear_page) ...@@ -24,11 +22,9 @@ ENTRY(clear_page)
xorl %eax,%eax xorl %eax,%eax
rep stosq rep stosq
ret ret
CFI_ENDPROC
ENDPROC(clear_page) ENDPROC(clear_page)
ENTRY(clear_page_orig) ENTRY(clear_page_orig)
CFI_STARTPROC
xorl %eax,%eax xorl %eax,%eax
movl $4096/64,%ecx movl $4096/64,%ecx
...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig) ...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
jnz .Lloop jnz .Lloop
nop nop
ret ret
CFI_ENDPROC
ENDPROC(clear_page_orig) ENDPROC(clear_page_orig)
ENTRY(clear_page_c_e) ENTRY(clear_page_c_e)
CFI_STARTPROC
movl $4096,%ecx movl $4096,%ecx
xorl %eax,%eax xorl %eax,%eax
rep stosb rep stosb
ret ret
CFI_ENDPROC
ENDPROC(clear_page_c_e) ENDPROC(clear_page_c_e)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* *
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/percpu.h> #include <asm/percpu.h>
.text .text
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
* %al : Operation successful * %al : Operation successful
*/ */
ENTRY(this_cpu_cmpxchg16b_emu) ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
...@@ -32,7 +30,7 @@ CFI_STARTPROC ...@@ -32,7 +30,7 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of # *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros). # macros).
# #
pushfq_cfi pushfq
cli cli
cmpq PER_CPU_VAR((%rsi)), %rax cmpq PER_CPU_VAR((%rsi)), %rax
...@@ -43,17 +41,13 @@ CFI_STARTPROC ...@@ -43,17 +41,13 @@ CFI_STARTPROC
movq %rbx, PER_CPU_VAR((%rsi)) movq %rbx, PER_CPU_VAR((%rsi))
movq %rcx, PER_CPU_VAR(8(%rsi)) movq %rcx, PER_CPU_VAR(8(%rsi))
CFI_REMEMBER_STATE popfq
popfq_cfi
mov $1, %al mov $1, %al
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
popfq_cfi popfq
xor %al,%al xor %al,%al
ret ret
CFI_ENDPROC
ENDPROC(this_cpu_cmpxchg16b_emu) ENDPROC(this_cpu_cmpxchg16b_emu)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
.text .text
...@@ -20,14 +19,13 @@ ...@@ -20,14 +19,13 @@
* %ecx : high 32 bits of new value * %ecx : high 32 bits of new value
*/ */
ENTRY(cmpxchg8b_emu) ENTRY(cmpxchg8b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg8b (%esi)' on UP except we don't # Emulate 'cmpxchg8b (%esi)' on UP except we don't
# set the whole ZF thing (caller will just compare # set the whole ZF thing (caller will just compare
# eax:edx with the expected value) # eax:edx with the expected value)
# #
pushfl_cfi pushfl
cli cli
cmpl (%esi), %eax cmpl (%esi), %eax
...@@ -38,18 +36,15 @@ CFI_STARTPROC ...@@ -38,18 +36,15 @@ CFI_STARTPROC
movl %ebx, (%esi) movl %ebx, (%esi)
movl %ecx, 4(%esi) movl %ecx, 4(%esi)
CFI_REMEMBER_STATE popfl
popfl_cfi
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
movl (%esi), %eax movl (%esi), %eax
.Lhalf_same: .Lhalf_same:
movl 4(%esi), %edx movl 4(%esi), %edx
popfl_cfi popfl
ret ret
CFI_ENDPROC
ENDPROC(cmpxchg8b_emu) ENDPROC(cmpxchg8b_emu)
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -13,22 +12,16 @@ ...@@ -13,22 +12,16 @@
*/ */
ALIGN ALIGN
ENTRY(copy_page) ENTRY(copy_page)
CFI_STARTPROC
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx movl $4096/8, %ecx
rep movsq rep movsq
ret ret
CFI_ENDPROC
ENDPROC(copy_page) ENDPROC(copy_page)
ENTRY(copy_page_regs) ENTRY(copy_page_regs)
CFI_STARTPROC
subq $2*8, %rsp subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx, (%rsp) movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0
movq %r12, 1*8(%rsp) movq %r12, 1*8(%rsp)
CFI_REL_OFFSET r12, 1*8
movl $(4096/64)-5, %ecx movl $(4096/64)-5, %ecx
.p2align 4 .p2align 4
...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs) ...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
jnz .Loop2 jnz .Loop2
movq (%rsp), %rbx movq (%rsp), %rbx
CFI_RESTORE rbx
movq 1*8(%rsp), %r12 movq 1*8(%rsp), %r12
CFI_RESTORE r12
addq $2*8, %rsp addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8
ret ret
CFI_ENDPROC
ENDPROC(copy_page_regs) ENDPROC(copy_page_regs)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -18,7 +17,6 @@ ...@@ -18,7 +17,6 @@
/* Standard copy_to_user with segment limit checking */ /* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user) ENTRY(_copy_to_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rdi,%rcx movq %rdi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user) ...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_to_user) ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */ /* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user) ENTRY(_copy_from_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rsi,%rcx movq %rsi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user) ...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_from_user) ENDPROC(_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
/* must zero dest */ /* must zero dest */
ENTRY(bad_from_user) ENTRY(bad_from_user)
bad_from_user: bad_from_user:
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
xorl %eax,%eax xorl %eax,%eax
rep rep
...@@ -62,7 +56,6 @@ bad_from_user: ...@@ -62,7 +56,6 @@ bad_from_user:
bad_to_user: bad_to_user:
movl %edx,%eax movl %edx,%eax
ret ret
CFI_ENDPROC
ENDPROC(bad_from_user) ENDPROC(bad_from_user)
.previous .previous
...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user) ...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_unrolled) ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled) ...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(copy_user_generic_unrolled) ENDPROC(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions. /* Some CPUs run faster using the string copy instructions.
...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled) ...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_string) ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */ jb 2f /* less than 8 bytes, go to byte copy loop */
...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string) ...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b) _ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b) _ASM_EXTABLE(3b,12b)
CFI_ENDPROC
ENDPROC(copy_user_generic_string) ENDPROC(copy_user_generic_string)
/* /*
...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string) ...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_enhanced_fast_string) ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
movl %edx,%ecx movl %edx,%ecx
1: rep 1: rep
...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string) ...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
.previous .previous
_ASM_EXTABLE(1b,12b) _ASM_EXTABLE(1b,12b)
CFI_ENDPROC
ENDPROC(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string)
/* /*
...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string) ...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
* This will force destination/source out of cache for more performance. * This will force destination/source out of cache for more performance.
*/ */
ENTRY(__copy_user_nocache) ENTRY(__copy_user_nocache)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache) ...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* for more details. No warranty for anything given at all. * for more details. No warranty for anything given at all.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -47,23 +46,16 @@ ...@@ -47,23 +46,16 @@
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
cmpl $3*64, %edx cmpl $3*64, %edx
jle .Lignore jle .Lignore
.Lignore: .Lignore:
subq $7*8, %rsp subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8
movq %rbx, 2*8(%rsp) movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8
movq %r12, 3*8(%rsp) movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
movq %r14, 4*8(%rsp) movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8
movq %r13, 5*8(%rsp) movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8
movq %rbp, 6*8(%rsp) movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8
movq %r8, (%rsp) movq %r8, (%rsp)
movq %r9, 1*8(%rsp) movq %r9, 1*8(%rsp)
...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic) ...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
addl %ebx, %eax addl %ebx, %eax
adcl %r9d, %eax /* carry */ adcl %r9d, %eax /* carry */
CFI_REMEMBER_STATE
.Lende: .Lende:
movq 2*8(%rsp), %rbx movq 2*8(%rsp), %rbx
CFI_RESTORE rbx
movq 3*8(%rsp), %r12 movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %r14 movq 4*8(%rsp), %r14
CFI_RESTORE r14
movq 5*8(%rsp), %r13 movq 5*8(%rsp), %r13
CFI_RESTORE r13
movq 6*8(%rsp), %rbp movq 6*8(%rsp), %rbp
CFI_RESTORE rbp
addq $7*8, %rsp addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
ret ret
CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */ /* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source: .Lbad_source:
...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic) ...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende jz .Lende
movl $-EFAULT, (%rax) movl $-EFAULT, (%rax)
jmp .Lende jmp .Lende
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -36,7 +35,6 @@ ...@@ -36,7 +35,6 @@
.text .text
ENTRY(__get_user_1) ENTRY(__get_user_1)
CFI_STARTPROC
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
...@@ -45,11 +43,9 @@ ENTRY(__get_user_1) ...@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
ENTRY(__get_user_2) ENTRY(__get_user_2)
CFI_STARTPROC
add $1,%_ASM_AX add $1,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -60,11 +56,9 @@ ENTRY(__get_user_2) ...@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
ENTRY(__get_user_4) ENTRY(__get_user_4)
CFI_STARTPROC
add $3,%_ASM_AX add $3,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -75,11 +69,9 @@ ENTRY(__get_user_4) ...@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_4) ENDPROC(__get_user_4)
ENTRY(__get_user_8) ENTRY(__get_user_8)
CFI_STARTPROC
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
add $7,%_ASM_AX add $7,%_ASM_AX
jc bad_get_user jc bad_get_user
...@@ -104,28 +96,23 @@ ENTRY(__get_user_8) ...@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
ASM_CLAC ASM_CLAC
ret ret
#endif #endif
CFI_ENDPROC
ENDPROC(__get_user_8) ENDPROC(__get_user_8)
bad_get_user: bad_get_user:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user) END(bad_get_user)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
bad_get_user_8: bad_get_user_8:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
xor %ecx,%ecx xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user_8) END(bad_get_user_8)
#endif #endif
......
...@@ -16,15 +16,12 @@ ...@@ -16,15 +16,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* override generic version in lib/iomap_copy.c * override generic version in lib/iomap_copy.c
*/ */
ENTRY(__iowrite32_copy) ENTRY(__iowrite32_copy)
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
rep movsd rep movsd
ret ret
CFI_ENDPROC
ENDPROC(__iowrite32_copy) ENDPROC(__iowrite32_copy)
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/dwarf2.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms) ...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
ENDPROC(memcpy_erms) ENDPROC(memcpy_erms)
ENTRY(memcpy_orig) ENTRY(memcpy_orig)
CFI_STARTPROC
movq %rdi, %rax movq %rdi, %rax
cmpq $0x20, %rdx cmpq $0x20, %rdx
...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig) ...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
.Lend: .Lend:
retq retq
CFI_ENDPROC
ENDPROC(memcpy_orig) ENDPROC(memcpy_orig)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -27,7 +26,6 @@ ...@@ -27,7 +26,6 @@
ENTRY(memmove) ENTRY(memmove)
ENTRY(__memmove) ENTRY(__memmove)
CFI_STARTPROC
/* Handle more 32 bytes in loop */ /* Handle more 32 bytes in loop */
mov %rdi, %rax mov %rdi, %rax
...@@ -207,6 +205,5 @@ ENTRY(__memmove) ...@@ -207,6 +205,5 @@ ENTRY(__memmove)
movb %r11b, (%rdi) movb %r11b, (%rdi)
13: 13:
retq retq
CFI_ENDPROC
ENDPROC(__memmove) ENDPROC(__memmove)
ENDPROC(memmove) ENDPROC(memmove)
/* Copyright 2002 Andi Kleen, SuSE Labs */ /* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -66,7 +65,6 @@ ENTRY(memset_erms) ...@@ -66,7 +65,6 @@ ENTRY(memset_erms)
ENDPROC(memset_erms) ENDPROC(memset_erms)
ENTRY(memset_orig) ENTRY(memset_orig)
CFI_STARTPROC
movq %rdi,%r10 movq %rdi,%r10
/* expand byte value */ /* expand byte value */
...@@ -78,7 +76,6 @@ ENTRY(memset_orig) ...@@ -78,7 +76,6 @@ ENTRY(memset_orig)
movl %edi,%r9d movl %edi,%r9d
andl $7,%r9d andl $7,%r9d
jnz .Lbad_alignment jnz .Lbad_alignment
CFI_REMEMBER_STATE
.Lafter_bad_alignment: .Lafter_bad_alignment:
movq %rdx,%rcx movq %rdx,%rcx
...@@ -128,7 +125,6 @@ ENTRY(memset_orig) ...@@ -128,7 +125,6 @@ ENTRY(memset_orig)
movq %r10,%rax movq %r10,%rax
ret ret
CFI_RESTORE_STATE
.Lbad_alignment: .Lbad_alignment:
cmpq $7,%rdx cmpq $7,%rdx
jbe .Lhandle_7 jbe .Lhandle_7
...@@ -139,5 +135,4 @@ ENTRY(memset_orig) ...@@ -139,5 +135,4 @@ ENTRY(memset_orig)
subq %r8,%rdx subq %r8,%rdx
jmp .Lafter_bad_alignment jmp .Lafter_bad_alignment
.Lfinal: .Lfinal:
CFI_ENDPROC
ENDPROC(memset_orig) ENDPROC(memset_orig)
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/dwarf2.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -13,9 +12,8 @@ ...@@ -13,9 +12,8 @@
*/ */
.macro op_safe_regs op .macro op_safe_regs op
ENTRY(\op\()_safe_regs) ENTRY(\op\()_safe_regs)
CFI_STARTPROC pushq %rbx
pushq_cfi_reg rbx pushq %rbp
pushq_cfi_reg rbp
movq %rdi, %r10 /* Save pointer */ movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */ xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax movl (%rdi), %eax
...@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs) ...@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
movl 20(%rdi), %ebp movl 20(%rdi), %ebp
movl 24(%rdi), %esi movl 24(%rdi), %esi
movl 28(%rdi), %edi movl 28(%rdi), %edi
CFI_REMEMBER_STATE
1: \op 1: \op
2: movl %eax, (%r10) 2: movl %eax, (%r10)
movl %r11d, %eax /* Return value */ movl %r11d, %eax /* Return value */
...@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs) ...@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
movl %ebp, 20(%r10) movl %ebp, 20(%r10)
movl %esi, 24(%r10) movl %esi, 24(%r10)
movl %edi, 28(%r10) movl %edi, 28(%r10)
popq_cfi_reg rbp popq %rbp
popq_cfi_reg rbx popq %rbx
ret ret
3: 3:
CFI_RESTORE_STATE
movl $-EIO, %r11d movl $-EIO, %r11d
jmp 2b jmp 2b
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs) ENDPROC(\op\()_safe_regs)
.endm .endm
...@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs) ...@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
.macro op_safe_regs op .macro op_safe_regs op
ENTRY(\op\()_safe_regs) ENTRY(\op\()_safe_regs)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %ebp
pushl_cfi_reg ebp pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi pushl $0 /* Return value */
pushl_cfi $0 /* Return value */ pushl %eax
pushl_cfi %eax
movl 4(%eax), %ecx movl 4(%eax), %ecx
movl 8(%eax), %edx movl 8(%eax), %edx
movl 12(%eax), %ebx movl 12(%eax), %ebx
...@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs) ...@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
movl 24(%eax), %esi movl 24(%eax), %esi
movl 28(%eax), %edi movl 28(%eax), %edi
movl (%eax), %eax movl (%eax), %eax
CFI_REMEMBER_STATE
1: \op 1: \op
2: pushl_cfi %eax 2: pushl %eax
movl 4(%esp), %eax movl 4(%esp), %eax
popl_cfi (%eax) popl (%eax)
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
movl %ecx, 4(%eax) movl %ecx, 4(%eax)
movl %edx, 8(%eax) movl %edx, 8(%eax)
movl %ebx, 12(%eax) movl %ebx, 12(%eax)
movl %ebp, 20(%eax) movl %ebp, 20(%eax)
movl %esi, 24(%eax) movl %esi, 24(%eax)
movl %edi, 28(%eax) movl %edi, 28(%eax)
popl_cfi %eax popl %eax
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg ebx popl %ebx
ret ret
3: 3:
CFI_RESTORE_STATE
movl $-EIO, 4(%esp) movl $-EIO, 4(%esp)
jmp 2b jmp 2b
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs) ENDPROC(\op\()_safe_regs)
.endm .endm
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
* return value. * return value.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -30,11 +29,9 @@ ...@@ -30,11 +29,9 @@
* as they get called from within inline assembly. * as they get called from within inline assembly.
*/ */
#define ENTER CFI_STARTPROC ; \ #define ENTER GET_THREAD_INFO(%_ASM_BX)
GET_THREAD_INFO(%_ASM_BX)
#define EXIT ASM_CLAC ; \ #define EXIT ASM_CLAC ; \
ret ; \ ret
CFI_ENDPROC
.text .text
ENTRY(__put_user_1) ENTRY(__put_user_1)
...@@ -87,7 +84,6 @@ ENTRY(__put_user_8) ...@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
ENDPROC(__put_user_8) ENDPROC(__put_user_8)
bad_put_user: bad_put_user:
CFI_STARTPROC
movl $-EFAULT,%eax movl $-EFAULT,%eax
EXIT EXIT
END(bad_put_user) END(bad_put_user)
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
...@@ -34,10 +33,10 @@ ...@@ -34,10 +33,10 @@
*/ */
#define save_common_regs \ #define save_common_regs \
pushl_cfi_reg ecx pushl %ecx
#define restore_common_regs \ #define restore_common_regs \
popl_cfi_reg ecx popl %ecx
/* Avoid uglifying the argument copying x86-64 needs to do. */ /* Avoid uglifying the argument copying x86-64 needs to do. */
.macro movq src, dst .macro movq src, dst
...@@ -64,50 +63,45 @@ ...@@ -64,50 +63,45 @@
*/ */
#define save_common_regs \ #define save_common_regs \
pushq_cfi_reg rdi; \ pushq %rdi; \
pushq_cfi_reg rsi; \ pushq %rsi; \
pushq_cfi_reg rcx; \ pushq %rcx; \
pushq_cfi_reg r8; \ pushq %r8; \
pushq_cfi_reg r9; \ pushq %r9; \
pushq_cfi_reg r10; \ pushq %r10; \
pushq_cfi_reg r11 pushq %r11
#define restore_common_regs \ #define restore_common_regs \
popq_cfi_reg r11; \ popq %r11; \
popq_cfi_reg r10; \ popq %r10; \
popq_cfi_reg r9; \ popq %r9; \
popq_cfi_reg r8; \ popq %r8; \
popq_cfi_reg rcx; \ popq %rcx; \
popq_cfi_reg rsi; \ popq %rsi; \
popq_cfi_reg rdi popq %rdi
#endif #endif
/* Fix up special calling conventions */ /* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_read_failed)
CFI_STARTPROC
save_common_regs save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi movq %rax,%rdi
call rwsem_down_read_failed call rwsem_down_read_failed
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_read_failed) ENDPROC(call_rwsem_down_read_failed)
ENTRY(call_rwsem_down_write_failed) ENTRY(call_rwsem_down_write_failed)
CFI_STARTPROC
save_common_regs save_common_regs
movq %rax,%rdi movq %rax,%rdi
call rwsem_down_write_failed call rwsem_down_write_failed
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_write_failed) ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_wake) ENTRY(call_rwsem_wake)
CFI_STARTPROC
/* do nothing if still outstanding active readers */ /* do nothing if still outstanding active readers */
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f jnz 1f
...@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake) ...@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
call rwsem_wake call rwsem_wake
restore_common_regs restore_common_regs
1: ret 1: ret
CFI_ENDPROC
ENDPROC(call_rwsem_wake) ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake) ENTRY(call_rwsem_downgrade_wake)
CFI_STARTPROC
save_common_regs save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi movq %rax,%rdi
call rwsem_downgrade_wake call rwsem_downgrade_wake
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake) ENDPROC(call_rwsem_downgrade_wake)
...@@ -6,16 +6,14 @@ ...@@ -6,16 +6,14 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* put return address in eax (arg1) */ /* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0 .macro THUNK name, func, put_ret_addr_in_eax=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC pushl %eax
pushl_cfi_reg eax pushl %ecx
pushl_cfi_reg ecx pushl %edx
pushl_cfi_reg edx
.if \put_ret_addr_in_eax .if \put_ret_addr_in_eax
/* Place EIP in the arg1 */ /* Place EIP in the arg1 */
...@@ -23,11 +21,10 @@ ...@@ -23,11 +21,10 @@
.endif .endif
call \func call \func
popl_cfi_reg edx popl %edx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg eax popl %eax
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* Subject to the GNU public license, v.2. No warranty of any kind. * Subject to the GNU public license, v.2. No warranty of any kind.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/calling.h> #include <asm/calling.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -14,27 +13,25 @@ ...@@ -14,27 +13,25 @@
.macro THUNK name, func, put_ret_addr_in_rdi=0 .macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC
/* this one pushes 9 elems, the next one would be %rIP */ /* this one pushes 9 elems, the next one would be %rIP */
pushq_cfi_reg rdi pushq %rdi
pushq_cfi_reg rsi pushq %rsi
pushq_cfi_reg rdx pushq %rdx
pushq_cfi_reg rcx pushq %rcx
pushq_cfi_reg rax pushq %rax
pushq_cfi_reg r8 pushq %r8
pushq_cfi_reg r9 pushq %r9
pushq_cfi_reg r10 pushq %r10
pushq_cfi_reg r11 pushq %r11
.if \put_ret_addr_in_rdi .if \put_ret_addr_in_rdi
/* 9*8(%rsp) is return addr on stack */ /* 9*8(%rsp) is return addr on stack */
movq_cfi_restore 9*8, rdi movq 9*8(%rsp), %rdi
.endif .endif
call \func call \func
jmp restore jmp restore
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
...@@ -57,19 +54,16 @@ ...@@ -57,19 +54,16 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \ #if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT)
CFI_STARTPROC
CFI_ADJUST_CFA_OFFSET 9*8
restore: restore:
popq_cfi_reg r11 popq %r11
popq_cfi_reg r10 popq %r10
popq_cfi_reg r9 popq %r9
popq_cfi_reg r8 popq %r8
popq_cfi_reg rax popq %rax
popq_cfi_reg rcx popq %rcx
popq_cfi_reg rdx popq %rdx
popq_cfi_reg rsi popq %rsi
popq_cfi_reg rdi popq %rdi
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(restore) _ASM_NOKPROBE(restore)
#endif #endif
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
* of the License. * of the License.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* Calling convention : * Calling convention :
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment