Commit 131484c8 authored by Ingo Molnar's avatar Ingo Molnar

x86/debug: Remove perpetually broken, unmaintainable dwarf annotations

So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

   27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

 - it should be maximally readable, and maximally low-key to
   'ordinary' code reading and maintenance.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for pop/push
   instructions that manipulate the stack pointer. This could
   be done for example via a preprocessing step that just
   looks for common patterns - plus special annotations for
   the few cases where we want to depart from the default.
   We have hundreds of CFI annotations, so automating most of
   that makes sense.

 - it should come with build tooling checks that ensure that
   CFI annotations are sensible. We've seen such efforts from
   the framepointer side, and there's no reason it couldn't be
   done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cdeb6048
...@@ -149,12 +149,6 @@ endif ...@@ -149,12 +149,6 @@ endif
sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp sp-$(CONFIG_X86_64) := rsp
# do binutils support CFI?
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
# does binutils support specific instructions? # does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1) ...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS := -m elf_$(UTS_MACHINE)
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Copyright 2000-2002 Andi Kleen, SuSE Labs. * Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/ */
#include <asm/dwarf2.h>
#include <asm/calling.h> #include <asm/calling.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/current.h> #include <asm/current.h>
...@@ -60,17 +59,6 @@ ...@@ -60,17 +59,6 @@
movl %eax,%eax /* zero extension */ movl %eax,%eax /* zero extension */
.endm .endm
.macro CFI_STARTPROC32 simple
CFI_STARTPROC \simple
CFI_UNDEFINED r8
CFI_UNDEFINED r9
CFI_UNDEFINED r10
CFI_UNDEFINED r11
CFI_UNDEFINED r12
CFI_UNDEFINED r13
CFI_UNDEFINED r14
CFI_UNDEFINED r15
.endm
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret32) ENTRY(native_usergs_sysret32)
...@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32) ...@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32)
* with the int 0x80 path. * with the int 0x80 path.
*/ */
ENTRY(ia32_sysenter_target) ENTRY(ia32_sysenter_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target) ...@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target)
movl %eax, %eax movl %eax, %eax
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
CFI_REGISTER rip,r10
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */ pushq $__USER32_DS /* pt_regs->ss */
pushq_cfi %rbp /* pt_regs->sp */ pushq %rbp /* pt_regs->sp */
CFI_REL_OFFSET rsp,0 pushfq /* pt_regs->flags */
pushfq_cfi /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */
pushq_cfi $__USER32_CS /* pt_regs->cs */ pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */ pushq %rax /* pt_regs->orig_ax */
CFI_REL_OFFSET rip,0 pushq %rdi /* pt_regs->di */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rcx /* pt_regs->cx */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq $-ENOSYS /* pt_regs->ax */
pushq_cfi_reg rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */
cld cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/* /*
* no need to do an access_ok check here because rbp has been * no need to do an access_ok check here because rbp has been
...@@ -161,8 +140,8 @@ sysenter_flags_fixed: ...@@ -161,8 +140,8 @@ sysenter_flags_fixed:
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz sysenter_tracesys jnz sysenter_tracesys
sysenter_do_call: sysenter_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32bit syscall -> 64bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi,%r8d /* arg5 */
...@@ -193,14 +172,12 @@ sysexit_from_sys_call: ...@@ -193,14 +172,12 @@ sysexit_from_sys_call:
*/ */
andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp),%ecx /* User %eip */ movl RIP(%rsp),%ecx /* User %eip */
CFI_REGISTER rip,rcx
RESTORE_RSI_RDI RESTORE_RSI_RDI
xorl %edx,%edx /* avoid info leaks */ xorl %edx,%edx /* avoid info leaks */
xorq %r8,%r8 xorq %r8,%r8
xorq %r9,%r9 xorq %r9,%r9
xorq %r10,%r10 xorq %r10,%r10
movl EFLAGS(%rsp),%r11d /* User eflags */ movl EFLAGS(%rsp),%r11d /* User eflags */
/*CFI_RESTORE rflags*/
TRACE_IRQS_ON TRACE_IRQS_ON
/* /*
...@@ -231,8 +208,6 @@ sysexit_from_sys_call: ...@@ -231,8 +208,6 @@ sysexit_from_sys_call:
*/ */
USERGS_SYSRET32 USERGS_SYSRET32
CFI_RESTORE_STATE
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
.macro auditsys_entry_common .macro auditsys_entry_common
movl %esi,%r8d /* 5th arg: 4th syscall arg */ movl %esi,%r8d /* 5th arg: 4th syscall arg */
...@@ -282,8 +257,8 @@ sysexit_audit: ...@@ -282,8 +257,8 @@ sysexit_audit:
#endif #endif
sysenter_fix_flags: sysenter_fix_flags:
pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq_cfi popfq
jmp sysenter_flags_fixed jmp sysenter_flags_fixed
sysenter_tracesys: sysenter_tracesys:
...@@ -298,7 +273,6 @@ sysenter_tracesys: ...@@ -298,7 +273,6 @@ sysenter_tracesys:
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp sysenter_do_call jmp sysenter_do_call
CFI_ENDPROC
ENDPROC(ia32_sysenter_target) ENDPROC(ia32_sysenter_target)
/* /*
...@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target) ...@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target)
* with the int 0x80 path. * with the int 0x80 path.
*/ */
ENTRY(ia32_cstar_target) ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target) ...@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target)
*/ */
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
movl %esp,%r8d movl %esp,%r8d
CFI_REGISTER rsp,r8
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
...@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target) ...@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target)
movl %eax,%eax movl %eax,%eax
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */ pushq $__USER32_DS /* pt_regs->ss */
pushq_cfi %r8 /* pt_regs->sp */ pushq %r8 /* pt_regs->sp */
CFI_REL_OFFSET rsp,0 pushq %r11 /* pt_regs->flags */
pushq_cfi %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */
pushq_cfi $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */
pushq_cfi %rcx /* pt_regs->ip */ pushq %rax /* pt_regs->orig_ax */
CFI_REL_OFFSET rip,0 pushq %rdi /* pt_regs->di */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rbp /* pt_regs->cx */
pushq_cfi_reg rdx /* pt_regs->dx */
pushq_cfi_reg rbp /* pt_regs->cx */
movl %ebp,%ecx movl %ebp,%ecx
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq $-ENOSYS /* pt_regs->ax */
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/* /*
* no need to do an access_ok check here because r8 has been * no need to do an access_ok check here because r8 has been
...@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target) ...@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target)
ASM_CLAC ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz cstar_tracesys jnz cstar_tracesys
cstar_do_call: cstar_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32bit syscall -> 64bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi,%r8d /* arg5 */
...@@ -403,15 +367,12 @@ sysretl_from_sys_call: ...@@ -403,15 +367,12 @@ sysretl_from_sys_call:
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
RESTORE_RSI_RDI_RDX RESTORE_RSI_RDI_RDX
movl RIP(%rsp),%ecx movl RIP(%rsp),%ecx
CFI_REGISTER rip,rcx
movl EFLAGS(%rsp),%r11d movl EFLAGS(%rsp),%r11d
/*CFI_REGISTER rflags,r11*/
xorq %r10,%r10 xorq %r10,%r10
xorq %r9,%r9 xorq %r9,%r9
xorq %r8,%r8 xorq %r8,%r8
TRACE_IRQS_ON TRACE_IRQS_ON
movl RSP(%rsp),%esp movl RSP(%rsp),%esp
CFI_RESTORE rsp
/* /*
* 64bit->32bit SYSRET restores eip from ecx, * 64bit->32bit SYSRET restores eip from ecx,
* eflags from r11 (but RF and VM bits are forced to 0), * eflags from r11 (but RF and VM bits are forced to 0),
...@@ -430,7 +391,6 @@ sysretl_from_sys_call: ...@@ -430,7 +391,6 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
cstar_auditsys: cstar_auditsys:
CFI_RESTORE_STATE
movl %r9d,R9(%rsp) /* register to be clobbered by call */ movl %r9d,R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common auditsys_entry_common
movl R9(%rsp),%r9d /* reload 6th syscall arg */ movl R9(%rsp),%r9d /* reload 6th syscall arg */
...@@ -460,7 +420,6 @@ ia32_badarg: ...@@ -460,7 +420,6 @@ ia32_badarg:
ASM_CLAC ASM_CLAC
movq $-EFAULT,%rax movq $-EFAULT,%rax
jmp ia32_sysret jmp ia32_sysret
CFI_ENDPROC
/* /*
* Emulated IA32 system calls via int 0x80. * Emulated IA32 system calls via int 0x80.
...@@ -484,15 +443,6 @@ ia32_badarg: ...@@ -484,15 +443,6 @@ ia32_badarg:
*/ */
ENTRY(ia32_syscall) ENTRY(ia32_syscall)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,5*8
/*CFI_REL_OFFSET ss,4*8 */
CFI_REL_OFFSET rsp,3*8
/*CFI_REL_OFFSET rflags,2*8 */
/*CFI_REL_OFFSET cs,1*8 */
CFI_REL_OFFSET rip,0*8
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -506,15 +456,14 @@ ENTRY(ia32_syscall) ...@@ -506,15 +456,14 @@ ENTRY(ia32_syscall)
movl %eax,%eax movl %eax,%eax
/* Construct struct pt_regs on stack (iret frame is already on stack) */ /* Construct struct pt_regs on stack (iret frame is already on stack) */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq $-ENOSYS /* pt_regs->ax */
cld cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
...@@ -544,7 +493,6 @@ ia32_tracesys: ...@@ -544,7 +493,6 @@ ia32_tracesys:
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp ia32_do_call jmp ia32_do_call
CFI_ENDPROC
END(ia32_syscall) END(ia32_syscall)
.macro PTREGSCALL label, func .macro PTREGSCALL label, func
...@@ -554,8 +502,6 @@ GLOBAL(\label) ...@@ -554,8 +502,6 @@ GLOBAL(\label)
jmp ia32_ptregs_common jmp ia32_ptregs_common
.endm .endm
CFI_STARTPROC32
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
PTREGSCALL stub32_sigreturn, sys32_sigreturn PTREGSCALL stub32_sigreturn, sys32_sigreturn
PTREGSCALL stub32_fork, sys_fork PTREGSCALL stub32_fork, sys_fork
...@@ -569,23 +515,8 @@ GLOBAL(stub32_clone) ...@@ -569,23 +515,8 @@ GLOBAL(stub32_clone)
ALIGN ALIGN
ia32_ptregs_common: ia32_ptregs_common:
CFI_ENDPROC
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,SIZEOF_PTREGS
CFI_REL_OFFSET rax,RAX
CFI_REL_OFFSET rcx,RCX
CFI_REL_OFFSET rdx,RDX
CFI_REL_OFFSET rsi,RSI
CFI_REL_OFFSET rdi,RDI
CFI_REL_OFFSET rip,RIP
/* CFI_REL_OFFSET cs,CS*/
/* CFI_REL_OFFSET rflags,EFLAGS*/
CFI_REL_OFFSET rsp,RSP
/* CFI_REL_OFFSET ss,SS*/
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
call *%rax call *%rax
RESTORE_EXTRA_REGS 8 RESTORE_EXTRA_REGS 8
ret ret
CFI_ENDPROC
END(ia32_ptregs_common) END(ia32_ptregs_common)
...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
#include <asm/dwarf2.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
...@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
subq $15*8+\addskip, %rsp subq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 15*8+\addskip
.endm .endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
.if \r11 .if \r11
movq_cfi r11, 6*8+\offset movq %r11, 6*8+\offset(%rsp)
.endif .endif
.if \r8910 .if \r8910
movq_cfi r10, 7*8+\offset movq %r10, 7*8+\offset(%rsp)
movq_cfi r9, 8*8+\offset movq %r9, 8*8+\offset(%rsp)
movq_cfi r8, 9*8+\offset movq %r8, 9*8+\offset(%rsp)
.endif .endif
.if \rax .if \rax
movq_cfi rax, 10*8+\offset movq %rax, 10*8+\offset(%rsp)
.endif .endif
.if \rcx .if \rcx
movq_cfi rcx, 11*8+\offset movq %rcx, 11*8+\offset(%rsp)
.endif .endif
movq_cfi rdx, 12*8+\offset movq %rdx, 12*8+\offset(%rsp)
movq_cfi rsi, 13*8+\offset movq %rsi, 13*8+\offset(%rsp)
movq_cfi rdi, 14*8+\offset movq %rdi, 14*8+\offset(%rsp)
.endm .endm
.macro SAVE_C_REGS offset=0 .macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro SAVE_EXTRA_REGS offset=0 .macro SAVE_EXTRA_REGS offset=0
movq_cfi r15, 0*8+\offset movq %r15, 0*8+\offset(%rsp)
movq_cfi r14, 1*8+\offset movq %r14, 1*8+\offset(%rsp)
movq_cfi r13, 2*8+\offset movq %r13, 2*8+\offset(%rsp)
movq_cfi r12, 3*8+\offset movq %r12, 3*8+\offset(%rsp)
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
movq_cfi rbx, 5*8+\offset movq %rbx, 5*8+\offset(%rsp)
.endm .endm
.macro SAVE_EXTRA_REGS_RBP offset=0 .macro SAVE_EXTRA_REGS_RBP offset=0
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
.endm .endm
.macro RESTORE_EXTRA_REGS offset=0 .macro RESTORE_EXTRA_REGS offset=0
movq_cfi_restore 0*8+\offset, r15 movq 0*8+\offset(%rsp), %r15
movq_cfi_restore 1*8+\offset, r14 movq 1*8+\offset(%rsp), %r14
movq_cfi_restore 2*8+\offset, r13 movq 2*8+\offset(%rsp), %r13
movq_cfi_restore 3*8+\offset, r12 movq 3*8+\offset(%rsp), %r12
movq_cfi_restore 4*8+\offset, rbp movq 4*8+\offset(%rsp), %rbp
movq_cfi_restore 5*8+\offset, rbx movq 5*8+\offset(%rsp), %rbx
.endm .endm
.macro ZERO_EXTRA_REGS .macro ZERO_EXTRA_REGS
...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11 .if \rstor_r11
movq_cfi_restore 6*8, r11 movq 6*8(%rsp), %r11
.endif .endif
.if \rstor_r8910 .if \rstor_r8910
movq_cfi_restore 7*8, r10 movq 7*8(%rsp), %r10
movq_cfi_restore 8*8, r9 movq 8*8(%rsp), %r9
movq_cfi_restore 9*8, r8 movq 9*8(%rsp), %r8
.endif .endif
.if \rstor_rax .if \rstor_rax
movq_cfi_restore 10*8, rax movq 10*8(%rsp), %rax
.endif .endif
.if \rstor_rcx .if \rstor_rcx
movq_cfi_restore 11*8, rcx movq 11*8(%rsp), %rcx
.endif .endif
.if \rstor_rdx .if \rstor_rdx
movq_cfi_restore 12*8, rdx movq 12*8(%rsp), %rdx
.endif .endif
movq_cfi_restore 13*8, rsi movq 13*8(%rsp), %rsi
movq_cfi_restore 14*8, rdi movq 14*8(%rsp), %rdi
.endm .endm
.macro RESTORE_C_REGS .macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1 RESTORE_C_REGS_HELPER 1,1,1,1,1
...@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
addq $15*8+\addskip, %rsp addq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm .endm
.macro icebp .macro icebp
...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
.macro SAVE_ALL .macro SAVE_ALL
pushl_cfi_reg eax pushl %eax
pushl_cfi_reg ebp pushl %ebp
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg esi pushl %esi
pushl_cfi_reg edx pushl %edx
pushl_cfi_reg ecx pushl %ecx
pushl_cfi_reg ebx pushl %ebx
.endm .endm
.macro RESTORE_ALL .macro RESTORE_ALL
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg edx popl %edx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg eax popl %eax
.endm .endm
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
* Macros for dwarf2 CFI unwind table entries.
* See "as.info" for details on these pseudo ops. Unfortunately
* they are only supported in very new binutils, so define them
* away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else
/*
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_ESCAPE cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro pushq_cfi_reg reg
pushq %\reg
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET \reg, 0
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro popq_cfi_reg reg
popq %\reg
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE \reg
.endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro pushl_cfi_reg reg
pushl %\reg
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET \reg, 0
.endm
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro popl_cfi_reg reg
popl %\reg
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE \reg
.endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look /* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */ frame pointer later */
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
.macro FRAME .macro FRAME
__ASM_SIZE(push,_cfi) %__ASM_REG(bp) __ASM_SIZE(push,) %__ASM_REG(bp)
CFI_REL_OFFSET __ASM_REG(bp), 0
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm .endm
.macro ENDFRAME .macro ENDFRAME
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp) __ASM_SIZE(pop,) %__ASM_REG(bp)
CFI_RESTORE __ASM_REG(bp)
.endm .endm
#else #else
.macro FRAME .macro FRAME
......
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/dwarf2.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/irq_vectors.h> #include <asm/irq_vectors.h>
...@@ -113,11 +112,10 @@ ...@@ -113,11 +112,10 @@
/* unfortunately push/pop can't be no-op */ /* unfortunately push/pop can't be no-op */
.macro PUSH_GS .macro PUSH_GS
pushl_cfi $0 pushl $0
.endm .endm
.macro POP_GS pop=0 .macro POP_GS pop=0
addl $(4 + \pop), %esp addl $(4 + \pop), %esp
CFI_ADJUST_CFA_OFFSET -(4 + \pop)
.endm .endm
.macro POP_GS_EX .macro POP_GS_EX
.endm .endm
...@@ -137,16 +135,13 @@ ...@@ -137,16 +135,13 @@
#else /* CONFIG_X86_32_LAZY_GS */ #else /* CONFIG_X86_32_LAZY_GS */
.macro PUSH_GS .macro PUSH_GS
pushl_cfi %gs pushl %gs
/*CFI_REL_OFFSET gs, 0*/
.endm .endm
.macro POP_GS pop=0 .macro POP_GS pop=0
98: popl_cfi %gs 98: popl %gs
/*CFI_RESTORE gs*/
.if \pop <> 0 .if \pop <> 0
add $\pop, %esp add $\pop, %esp
CFI_ADJUST_CFA_OFFSET -\pop
.endif .endif
.endm .endm
.macro POP_GS_EX .macro POP_GS_EX
...@@ -170,11 +165,9 @@ ...@@ -170,11 +165,9 @@
.macro GS_TO_REG reg .macro GS_TO_REG reg
movl %gs, \reg movl %gs, \reg
/*CFI_REGISTER gs, \reg*/
.endm .endm
.macro REG_TO_PTGS reg .macro REG_TO_PTGS reg
movl \reg, PT_GS(%esp) movl \reg, PT_GS(%esp)
/*CFI_REL_OFFSET gs, PT_GS*/
.endm .endm
.macro SET_KERNEL_GS reg .macro SET_KERNEL_GS reg
movl $(__KERNEL_STACK_CANARY), \reg movl $(__KERNEL_STACK_CANARY), \reg
...@@ -186,26 +179,16 @@ ...@@ -186,26 +179,16 @@
.macro SAVE_ALL .macro SAVE_ALL
cld cld
PUSH_GS PUSH_GS
pushl_cfi %fs pushl %fs
/*CFI_REL_OFFSET fs, 0;*/ pushl %es
pushl_cfi %es pushl %ds
/*CFI_REL_OFFSET es, 0;*/ pushl %eax
pushl_cfi %ds pushl %ebp
/*CFI_REL_OFFSET ds, 0;*/ pushl %edi
pushl_cfi %eax pushl %esi
CFI_REL_OFFSET eax, 0 pushl %edx
pushl_cfi %ebp pushl %ecx
CFI_REL_OFFSET ebp, 0 pushl %ebx
pushl_cfi %edi
CFI_REL_OFFSET edi, 0
pushl_cfi %esi
CFI_REL_OFFSET esi, 0
pushl_cfi %edx
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
movl $(__USER_DS), %edx movl $(__USER_DS), %edx
movl %edx, %ds movl %edx, %ds
movl %edx, %es movl %edx, %es
...@@ -215,30 +198,20 @@ ...@@ -215,30 +198,20 @@
.endm .endm
.macro RESTORE_INT_REGS .macro RESTORE_INT_REGS
popl_cfi %ebx popl %ebx
CFI_RESTORE ebx popl %ecx
popl_cfi %ecx popl %edx
CFI_RESTORE ecx popl %esi
popl_cfi %edx popl %edi
CFI_RESTORE edx popl %ebp
popl_cfi %esi popl %eax
CFI_RESTORE esi
popl_cfi %edi
CFI_RESTORE edi
popl_cfi %ebp
CFI_RESTORE ebp
popl_cfi %eax
CFI_RESTORE eax
.endm .endm
.macro RESTORE_REGS pop=0 .macro RESTORE_REGS pop=0
RESTORE_INT_REGS RESTORE_INT_REGS
1: popl_cfi %ds 1: popl %ds
/*CFI_RESTORE ds;*/ 2: popl %es
2: popl_cfi %es 3: popl %fs
/*CFI_RESTORE es;*/
3: popl_cfi %fs
/*CFI_RESTORE fs;*/
POP_GS \pop POP_GS \pop
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
4: movl $0, (%esp) 4: movl $0, (%esp)
...@@ -254,64 +227,27 @@ ...@@ -254,64 +227,27 @@
POP_GS_EX POP_GS_EX
.endm .endm
.macro RING0_INT_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 3*4
/*CFI_OFFSET cs, -2*4;*/
CFI_OFFSET eip, -3*4
.endm
.macro RING0_EC_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 4*4
/*CFI_OFFSET cs, -2*4;*/
CFI_OFFSET eip, -3*4
.endm
.macro RING0_PTREGS_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
CFI_OFFSET eip, PT_EIP-PT_OLDESP
/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
CFI_OFFSET eax, PT_EAX-PT_OLDESP
CFI_OFFSET ebp, PT_EBP-PT_OLDESP
CFI_OFFSET edi, PT_EDI-PT_OLDESP
CFI_OFFSET esi, PT_ESI-PT_OLDESP
CFI_OFFSET edx, PT_EDX-PT_OLDESP
CFI_OFFSET ecx, PT_ECX-PT_OLDESP
CFI_OFFSET ebx, PT_EBX-PT_OLDESP
.endm
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
CFI_STARTPROC pushl %eax
pushl_cfi %eax
call schedule_tail call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl_cfi %eax popl %eax
pushl_cfi $0x0202 # Reset kernel eflags pushl $0x0202 # Reset kernel eflags
popfl_cfi popfl
jmp syscall_exit jmp syscall_exit
CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
ENTRY(ret_from_kernel_thread) ENTRY(ret_from_kernel_thread)
CFI_STARTPROC pushl %eax
pushl_cfi %eax
call schedule_tail call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl_cfi %eax popl %eax
pushl_cfi $0x0202 # Reset kernel eflags pushl $0x0202 # Reset kernel eflags
popfl_cfi popfl
movl PT_EBP(%esp),%eax movl PT_EBP(%esp),%eax
call *PT_EBX(%esp) call *PT_EBX(%esp)
movl $0,PT_EAX(%esp) movl $0,PT_EAX(%esp)
jmp syscall_exit jmp syscall_exit
CFI_ENDPROC
ENDPROC(ret_from_kernel_thread) ENDPROC(ret_from_kernel_thread)
/* /*
...@@ -323,7 +259,6 @@ ENDPROC(ret_from_kernel_thread) ...@@ -323,7 +259,6 @@ ENDPROC(ret_from_kernel_thread)
# userspace resumption stub bypassing syscall exit tracing # userspace resumption stub bypassing syscall exit tracing
ALIGN ALIGN
RING0_PTREGS_FRAME
ret_from_exception: ret_from_exception:
preempt_stop(CLBR_ANY) preempt_stop(CLBR_ANY)
ret_from_intr: ret_from_intr:
...@@ -367,17 +302,12 @@ need_resched: ...@@ -367,17 +302,12 @@ need_resched:
jmp need_resched jmp need_resched
END(resume_kernel) END(resume_kernel)
#endif #endif
CFI_ENDPROC
/* SYSENTER_RETURN points to after the "sysenter" instruction in /* SYSENTER_RETURN points to after the "sysenter" instruction in
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
# sysenter call handler stub # sysenter call handler stub
ENTRY(ia32_sysenter_target) ENTRY(ia32_sysenter_target)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 0
CFI_REGISTER esp, ebp
movl TSS_sysenter_sp0(%esp),%esp movl TSS_sysenter_sp0(%esp),%esp
sysenter_past_esp: sysenter_past_esp:
/* /*
...@@ -385,14 +315,11 @@ sysenter_past_esp: ...@@ -385,14 +315,11 @@ sysenter_past_esp:
* enough kernel state to call TRACE_IRQS_OFF can be called - but * enough kernel state to call TRACE_IRQS_OFF can be called - but
* we immediately enable interrupts at that point anyway. * we immediately enable interrupts at that point anyway.
*/ */
pushl_cfi $__USER_DS pushl $__USER_DS
/*CFI_REL_OFFSET ss, 0*/ pushl %ebp
pushl_cfi %ebp pushfl
CFI_REL_OFFSET esp, 0
pushfl_cfi
orl $X86_EFLAGS_IF, (%esp) orl $X86_EFLAGS_IF, (%esp)
pushl_cfi $__USER_CS pushl $__USER_CS
/*CFI_REL_OFFSET cs, 0*/
/* /*
* Push current_thread_info()->sysenter_return to the stack. * Push current_thread_info()->sysenter_return to the stack.
* A tiny bit of offset fixup is necessary: TI_sysenter_return * A tiny bit of offset fixup is necessary: TI_sysenter_return
...@@ -401,10 +328,9 @@ sysenter_past_esp: ...@@ -401,10 +328,9 @@ sysenter_past_esp:
* TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
* and THREAD_SIZE takes us to the bottom. * and THREAD_SIZE takes us to the bottom.
*/ */
pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
CFI_REL_OFFSET eip, 0
pushl_cfi %eax pushl %eax
SAVE_ALL SAVE_ALL
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
...@@ -453,11 +379,11 @@ sysenter_audit: ...@@ -453,11 +379,11 @@ sysenter_audit:
/* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
/* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
pushl_cfi PT_ESI(%esp) /* a3: 5th arg */ pushl PT_ESI(%esp) /* a3: 5th arg */
pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */ pushl PT_EDX+4(%esp) /* a2: 4th arg */
call __audit_syscall_entry call __audit_syscall_entry
popl_cfi %ecx /* get that remapped edx off the stack */ popl %ecx /* get that remapped edx off the stack */
popl_cfi %ecx /* get that remapped esi off the stack */ popl %ecx /* get that remapped esi off the stack */
movl PT_EAX(%esp),%eax /* reload syscall number */ movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call jmp sysenter_do_call
...@@ -480,7 +406,6 @@ sysexit_audit: ...@@ -480,7 +406,6 @@ sysexit_audit:
jmp sysenter_exit jmp sysenter_exit
#endif #endif
CFI_ENDPROC
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
2: movl $0,PT_FS(%esp) 2: movl $0,PT_FS(%esp)
jmp 1b jmp 1b
...@@ -491,9 +416,8 @@ ENDPROC(ia32_sysenter_target) ...@@ -491,9 +416,8 @@ ENDPROC(ia32_sysenter_target)
# system call handler stub # system call handler stub
ENTRY(system_call) ENTRY(system_call)
RING0_INT_FRAME # can't unwind into user space anyway
ASM_CLAC ASM_CLAC
pushl_cfi %eax # save orig_eax pushl %eax # save orig_eax
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
# system call tracing in operation / emulation # system call tracing in operation / emulation
...@@ -527,7 +451,6 @@ restore_all_notrace: ...@@ -527,7 +451,6 @@ restore_all_notrace:
movb PT_CS(%esp), %al movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS je ldt_ss # returning to user-space with LDT SS
#endif #endif
restore_nocheck: restore_nocheck:
...@@ -543,7 +466,6 @@ ENTRY(iret_exc) ...@@ -543,7 +466,6 @@ ENTRY(iret_exc)
_ASM_EXTABLE(irq_return,iret_exc) _ASM_EXTABLE(irq_return,iret_exc)
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
CFI_RESTORE_STATE
ldt_ss: ldt_ss:
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/* /*
...@@ -577,22 +499,19 @@ ldt_ss: ...@@ -577,22 +499,19 @@ ldt_ss:
shr $16, %edx shr $16, %edx
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
pushl_cfi $__ESPFIX_SS pushl $__ESPFIX_SS
pushl_cfi %eax /* new kernel esp */ pushl %eax /* new kernel esp */
/* Disable interrupts, but do not irqtrace this section: we /* Disable interrupts, but do not irqtrace this section: we
* will soon execute iret and the tracer was already set to * will soon execute iret and the tracer was already set to
* the irqstate after the iret */ * the irqstate after the iret */
DISABLE_INTERRUPTS(CLBR_EAX) DISABLE_INTERRUPTS(CLBR_EAX)
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8
jmp restore_nocheck jmp restore_nocheck
#endif #endif
CFI_ENDPROC
ENDPROC(system_call) ENDPROC(system_call)
# perform work that needs to be done immediately before resumption # perform work that needs to be done immediately before resumption
ALIGN ALIGN
RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending: work_pending:
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig jz work_notifysig
...@@ -634,9 +553,9 @@ work_notifysig: # deal with pending signals and ...@@ -634,9 +553,9 @@ work_notifysig: # deal with pending signals and
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
ALIGN ALIGN
work_notifysig_v86: work_notifysig_v86:
pushl_cfi %ecx # save ti_flags for do_notify_resume pushl %ecx # save ti_flags for do_notify_resume
call save_v86_state # %eax contains pt_regs pointer call save_v86_state # %eax contains pt_regs pointer
popl_cfi %ecx popl %ecx
movl %eax, %esp movl %eax, %esp
jmp 1b jmp 1b
#endif #endif
...@@ -666,9 +585,7 @@ syscall_exit_work: ...@@ -666,9 +585,7 @@ syscall_exit_work:
call syscall_trace_leave call syscall_trace_leave
jmp resume_userspace jmp resume_userspace
END(syscall_exit_work) END(syscall_exit_work)
CFI_ENDPROC
RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault: syscall_fault:
ASM_CLAC ASM_CLAC
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
...@@ -685,7 +602,6 @@ sysenter_badsys: ...@@ -685,7 +602,6 @@ sysenter_badsys:
movl $-ENOSYS,%eax movl $-ENOSYS,%eax
jmp sysenter_after_call jmp sysenter_after_call
END(sysenter_badsys) END(sysenter_badsys)
CFI_ENDPROC
.macro FIXUP_ESPFIX_STACK .macro FIXUP_ESPFIX_STACK
/* /*
...@@ -701,10 +617,9 @@ END(sysenter_badsys) ...@@ -701,10 +617,9 @@ END(sysenter_badsys)
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
shl $16, %eax shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */ addl %esp, %eax /* the adjusted stack pointer */
pushl_cfi $__KERNEL_DS pushl $__KERNEL_DS
pushl_cfi %eax pushl %eax
lss (%esp), %esp /* switch to the normal stack segment */ lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8
#endif #endif
.endm .endm
.macro UNWIND_ESPFIX_STACK .macro UNWIND_ESPFIX_STACK
...@@ -728,13 +643,11 @@ END(sysenter_badsys) ...@@ -728,13 +643,11 @@ END(sysenter_badsys)
*/ */
.align 8 .align 8
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ pushl $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1 vector=vector+1
jmp common_interrupt jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -4
.align 8 .align 8
.endr .endr
END(irq_entries_start) END(irq_entries_start)
...@@ -753,19 +666,16 @@ common_interrupt: ...@@ -753,19 +666,16 @@ common_interrupt:
call do_IRQ call do_IRQ
jmp ret_from_intr jmp ret_from_intr
ENDPROC(common_interrupt) ENDPROC(common_interrupt)
CFI_ENDPROC
#define BUILD_INTERRUPT3(name, nr, fn) \ #define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \ ENTRY(name) \
RING0_INT_FRAME; \
ASM_CLAC; \ ASM_CLAC; \
pushl_cfi $~(nr); \ pushl $~(nr); \
SAVE_ALL; \ SAVE_ALL; \
TRACE_IRQS_OFF \ TRACE_IRQS_OFF \
movl %esp,%eax; \ movl %esp,%eax; \
call fn; \ call fn; \
jmp ret_from_intr; \ jmp ret_from_intr; \
CFI_ENDPROC; \
ENDPROC(name) ENDPROC(name)
...@@ -784,37 +694,31 @@ ENDPROC(name) ...@@ -784,37 +694,31 @@ ENDPROC(name)
#include <asm/entry_arch.h> #include <asm/entry_arch.h>
ENTRY(coprocessor_error) ENTRY(coprocessor_error)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_coprocessor_error pushl $do_coprocessor_error
jmp error_code jmp error_code
CFI_ENDPROC
END(coprocessor_error) END(coprocessor_error)
ENTRY(simd_coprocessor_error) ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
#ifdef CONFIG_X86_INVD_BUG #ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
ALTERNATIVE "pushl_cfi $do_general_protection", \ ALTERNATIVE "pushl $do_general_protection", \
"pushl $do_simd_coprocessor_error", \ "pushl $do_simd_coprocessor_error", \
X86_FEATURE_XMM X86_FEATURE_XMM
#else #else
pushl_cfi $do_simd_coprocessor_error pushl $do_simd_coprocessor_error
#endif #endif
jmp error_code jmp error_code
CFI_ENDPROC
END(simd_coprocessor_error) END(simd_coprocessor_error)
ENTRY(device_not_available) ENTRY(device_not_available)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $-1 # mark this as an int pushl $-1 # mark this as an int
pushl_cfi $do_device_not_available pushl $do_device_not_available
jmp error_code jmp error_code
CFI_ENDPROC
END(device_not_available) END(device_not_available)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
...@@ -830,115 +734,89 @@ END(native_irq_enable_sysexit) ...@@ -830,115 +734,89 @@ END(native_irq_enable_sysexit)
#endif #endif
ENTRY(overflow) ENTRY(overflow)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_overflow pushl $do_overflow
jmp error_code jmp error_code
CFI_ENDPROC
END(overflow) END(overflow)
ENTRY(bounds) ENTRY(bounds)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_bounds pushl $do_bounds
jmp error_code jmp error_code
CFI_ENDPROC
END(bounds) END(bounds)
ENTRY(invalid_op) ENTRY(invalid_op)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_invalid_op pushl $do_invalid_op
jmp error_code jmp error_code
CFI_ENDPROC
END(invalid_op) END(invalid_op)
ENTRY(coprocessor_segment_overrun) ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_coprocessor_segment_overrun pushl $do_coprocessor_segment_overrun
jmp error_code jmp error_code
CFI_ENDPROC
END(coprocessor_segment_overrun) END(coprocessor_segment_overrun)
ENTRY(invalid_TSS) ENTRY(invalid_TSS)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_invalid_TSS pushl $do_invalid_TSS
jmp error_code jmp error_code
CFI_ENDPROC
END(invalid_TSS) END(invalid_TSS)
ENTRY(segment_not_present) ENTRY(segment_not_present)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_segment_not_present pushl $do_segment_not_present
jmp error_code jmp error_code
CFI_ENDPROC
END(segment_not_present) END(segment_not_present)
ENTRY(stack_segment) ENTRY(stack_segment)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_stack_segment pushl $do_stack_segment
jmp error_code jmp error_code
CFI_ENDPROC
END(stack_segment) END(stack_segment)
ENTRY(alignment_check) ENTRY(alignment_check)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_alignment_check pushl $do_alignment_check
jmp error_code jmp error_code
CFI_ENDPROC
END(alignment_check) END(alignment_check)
ENTRY(divide_error) ENTRY(divide_error)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 # no error code pushl $0 # no error code
pushl_cfi $do_divide_error pushl $do_divide_error
jmp error_code jmp error_code
CFI_ENDPROC
END(divide_error) END(divide_error)
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
ENTRY(machine_check) ENTRY(machine_check)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi machine_check_vector pushl machine_check_vector
jmp error_code jmp error_code
CFI_ENDPROC
END(machine_check) END(machine_check)
#endif #endif
ENTRY(spurious_interrupt_bug) ENTRY(spurious_interrupt_bug)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_spurious_interrupt_bug pushl $do_spurious_interrupt_bug
jmp error_code jmp error_code
CFI_ENDPROC
END(spurious_interrupt_bug) END(spurious_interrupt_bug)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter /* Xen doesn't set %esp to be precisely what the normal sysenter
entrypoint expects, so fix it up before using the normal path. */ entrypoint expects, so fix it up before using the normal path. */
ENTRY(xen_sysenter_target) ENTRY(xen_sysenter_target)
RING0_INT_FRAME
addl $5*4, %esp /* remove xen-provided frame */ addl $5*4, %esp /* remove xen-provided frame */
CFI_ADJUST_CFA_OFFSET -5*4
jmp sysenter_past_esp jmp sysenter_past_esp
CFI_ENDPROC
ENTRY(xen_hypervisor_callback) ENTRY(xen_hypervisor_callback)
CFI_STARTPROC pushl $-1 /* orig_ax = -1 => not a system call */
pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -962,7 +840,6 @@ ENTRY(xen_do_upcall) ...@@ -962,7 +840,6 @@ ENTRY(xen_do_upcall)
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp ret_from_intr jmp ret_from_intr
CFI_ENDPROC
ENDPROC(xen_hypervisor_callback) ENDPROC(xen_hypervisor_callback)
# Hypervisor uses this for application faults while it executes. # Hypervisor uses this for application faults while it executes.
...@@ -976,8 +853,7 @@ ENDPROC(xen_hypervisor_callback) ...@@ -976,8 +853,7 @@ ENDPROC(xen_hypervisor_callback)
# to pop the stack frame we end up in an infinite loop of failsafe callbacks. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by maintaining a status value in EAX. # We distinguish between categories by maintaining a status value in EAX.
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
CFI_STARTPROC pushl %eax
pushl_cfi %eax
movl $1,%eax movl $1,%eax
1: mov 4(%esp),%ds 1: mov 4(%esp),%ds
2: mov 8(%esp),%es 2: mov 8(%esp),%es
...@@ -986,15 +862,13 @@ ENTRY(xen_failsafe_callback) ...@@ -986,15 +862,13 @@ ENTRY(xen_failsafe_callback)
/* EAX == 0 => Category 1 (Bad segment) /* EAX == 0 => Category 1 (Bad segment)
EAX != 0 => Category 2 (Bad IRET) */ EAX != 0 => Category 2 (Bad IRET) */
testl %eax,%eax testl %eax,%eax
popl_cfi %eax popl %eax
lea 16(%esp),%esp lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16
jz 5f jz 5f
jmp iret_exc jmp iret_exc
5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 5: pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL SAVE_ALL
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
.section .fixup,"ax" .section .fixup,"ax"
6: xorl %eax,%eax 6: xorl %eax,%eax
...@@ -1195,34 +1069,28 @@ return_to_handler: ...@@ -1195,34 +1069,28 @@ return_to_handler:
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
ENTRY(trace_page_fault) ENTRY(trace_page_fault)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $trace_do_page_fault pushl $trace_do_page_fault
jmp error_code jmp error_code
CFI_ENDPROC
END(trace_page_fault) END(trace_page_fault)
#endif #endif
ENTRY(page_fault) ENTRY(page_fault)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_page_fault pushl $do_page_fault
ALIGN ALIGN
error_code: error_code:
/* the function address is in %gs's slot on the stack */ /* the function address is in %gs's slot on the stack */
pushl_cfi %fs pushl %fs
/*CFI_REL_OFFSET fs, 0*/ pushl %es
pushl_cfi %es pushl %ds
/*CFI_REL_OFFSET es, 0*/ pushl %eax
pushl_cfi %ds pushl %ebp
/*CFI_REL_OFFSET ds, 0*/ pushl %edi
pushl_cfi_reg eax pushl %esi
pushl_cfi_reg ebp pushl %edx
pushl_cfi_reg edi pushl %ecx
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg edx
pushl_cfi_reg ecx
pushl_cfi_reg ebx
cld cld
movl $(__KERNEL_PERCPU), %ecx movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs movl %ecx, %fs
...@@ -1240,7 +1108,6 @@ error_code: ...@@ -1240,7 +1108,6 @@ error_code:
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call *%edi call *%edi
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
END(page_fault) END(page_fault)
/* /*
...@@ -1261,29 +1128,24 @@ END(page_fault) ...@@ -1261,29 +1128,24 @@ END(page_fault)
jne \ok jne \ok
\label: \label:
movl TSS_sysenter_sp0 + \offset(%esp), %esp movl TSS_sysenter_sp0 + \offset(%esp), %esp
CFI_DEF_CFA esp, 0 pushfl
CFI_UNDEFINED eip pushl $__KERNEL_CS
pushfl_cfi pushl $sysenter_past_esp
pushl_cfi $__KERNEL_CS
pushl_cfi $sysenter_past_esp
CFI_REL_OFFSET eip, 0
.endm .endm
ENTRY(debug) ENTRY(debug)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
cmpl $ia32_sysenter_target,(%esp) cmpl $ia32_sysenter_target,(%esp)
jne debug_stack_correct jne debug_stack_correct
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
debug_stack_correct: debug_stack_correct:
pushl_cfi $-1 # mark this as an int pushl $-1 # mark this as an int
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
xorl %edx,%edx # error code 0 xorl %edx,%edx # error code 0
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_debug call do_debug
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
END(debug) END(debug)
/* /*
...@@ -1295,45 +1157,40 @@ END(debug) ...@@ -1295,45 +1157,40 @@ END(debug)
* fault happened on the sysenter path. * fault happened on the sysenter path.
*/ */
ENTRY(nmi) ENTRY(nmi)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax pushl %eax
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
popl_cfi %eax popl %eax
je nmi_espfix_stack je nmi_espfix_stack
#endif #endif
cmpl $ia32_sysenter_target,(%esp) cmpl $ia32_sysenter_target,(%esp)
je nmi_stack_fixup je nmi_stack_fixup
pushl_cfi %eax pushl %eax
movl %esp,%eax movl %esp,%eax
/* Do not access memory above the end of our stack page, /* Do not access memory above the end of our stack page,
* it might not exist. * it might not exist.
*/ */
andl $(THREAD_SIZE-1),%eax andl $(THREAD_SIZE-1),%eax
cmpl $(THREAD_SIZE-20),%eax cmpl $(THREAD_SIZE-20),%eax
popl_cfi %eax popl %eax
jae nmi_stack_correct jae nmi_stack_correct
cmpl $ia32_sysenter_target,12(%esp) cmpl $ia32_sysenter_target,12(%esp)
je nmi_debug_stack_check je nmi_debug_stack_check
nmi_stack_correct: nmi_stack_correct:
/* We have a RING0_INT_FRAME here */ pushl %eax
pushl_cfi %eax
SAVE_ALL SAVE_ALL
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_nmi call do_nmi
jmp restore_all_notrace jmp restore_all_notrace
CFI_ENDPROC
nmi_stack_fixup: nmi_stack_fixup:
RING0_INT_FRAME
FIX_STACK 12, nmi_stack_correct, 1 FIX_STACK 12, nmi_stack_correct, 1
jmp nmi_stack_correct jmp nmi_stack_correct
nmi_debug_stack_check: nmi_debug_stack_check:
/* We have a RING0_INT_FRAME here */
cmpw $__KERNEL_CS,16(%esp) cmpw $__KERNEL_CS,16(%esp)
jne nmi_stack_correct jne nmi_stack_correct
cmpl $debug,(%esp) cmpl $debug,(%esp)
...@@ -1345,57 +1202,48 @@ nmi_debug_stack_check: ...@@ -1345,57 +1202,48 @@ nmi_debug_stack_check:
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack: nmi_espfix_stack:
/* We have a RING0_INT_FRAME here. /*
*
* create the pointer to lss back * create the pointer to lss back
*/ */
pushl_cfi %ss pushl %ss
pushl_cfi %esp pushl %esp
addl $4, (%esp) addl $4, (%esp)
/* copy the iret frame of 12 bytes */ /* copy the iret frame of 12 bytes */
.rept 3 .rept 3
pushl_cfi 16(%esp) pushl 16(%esp)
.endr .endr
pushl_cfi %eax pushl %eax
SAVE_ALL SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
call do_nmi call do_nmi
RESTORE_REGS RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24
jmp irq_return jmp irq_return
#endif #endif
CFI_ENDPROC
END(nmi) END(nmi)
ENTRY(int3) ENTRY(int3)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $-1 # mark this as an int pushl $-1 # mark this as an int
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_int3 call do_int3
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
END(int3) END(int3)
ENTRY(general_protection) ENTRY(general_protection)
RING0_EC_FRAME pushl $do_general_protection
pushl_cfi $do_general_protection
jmp error_code jmp error_code
CFI_ENDPROC
END(general_protection) END(general_protection)
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault) ENTRY(async_page_fault)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_async_page_fault pushl $do_async_page_fault
jmp error_code jmp error_code
CFI_ENDPROC
END(async_page_fault) END(async_page_fault)
#endif #endif
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
* at the top of the kernel process stack. * at the top of the kernel process stack.
* *
* Some macro usage: * Some macro usage:
* - CFI macros are used to generate dwarf2 unwind information for better
* backtraces. They don't change any code.
* - ENTRY/END Define functions in the symbol table. * - ENTRY/END Define functions in the symbol table.
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
* - idtentry - Define exception entry points. * - idtentry - Define exception entry points.
...@@ -30,7 +28,6 @@ ...@@ -30,7 +28,6 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h> #include <asm/calling.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -112,61 +109,6 @@ ENDPROC(native_usergs_sysret64) ...@@ -112,61 +109,6 @@ ENDPROC(native_usergs_sysret64)
# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
#endif #endif
/*
* empty frame
*/
.macro EMPTY_FRAME start=1 offset=0
.if \start
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,8+\offset
.else
CFI_DEF_CFA_OFFSET 8+\offset
.endif
.endm
/*
* initial frame state for interrupts (and exceptions without error code)
*/
.macro INTR_FRAME start=1 offset=0
EMPTY_FRAME \start, 5*8+\offset
/*CFI_REL_OFFSET ss, 4*8+\offset*/
CFI_REL_OFFSET rsp, 3*8+\offset
/*CFI_REL_OFFSET rflags, 2*8+\offset*/
/*CFI_REL_OFFSET cs, 1*8+\offset*/
CFI_REL_OFFSET rip, 0*8+\offset
.endm
/*
* initial frame state for exceptions with error code (and interrupts
* with vector already pushed)
*/
.macro XCPT_FRAME start=1 offset=0
INTR_FRAME \start, 1*8+\offset
.endm
/*
* frame that enables passing a complete pt_regs to a C function.
*/
.macro DEFAULT_FRAME start=1 offset=0
XCPT_FRAME \start, ORIG_RAX+\offset
CFI_REL_OFFSET rdi, RDI+\offset
CFI_REL_OFFSET rsi, RSI+\offset
CFI_REL_OFFSET rdx, RDX+\offset
CFI_REL_OFFSET rcx, RCX+\offset
CFI_REL_OFFSET rax, RAX+\offset
CFI_REL_OFFSET r8, R8+\offset
CFI_REL_OFFSET r9, R9+\offset
CFI_REL_OFFSET r10, R10+\offset
CFI_REL_OFFSET r11, R11+\offset
CFI_REL_OFFSET rbx, RBX+\offset
CFI_REL_OFFSET rbp, RBP+\offset
CFI_REL_OFFSET r12, R12+\offset
CFI_REL_OFFSET r13, R13+\offset
CFI_REL_OFFSET r14, R14+\offset
CFI_REL_OFFSET r15, R15+\offset
.endm
/* /*
* 64bit SYSCALL instruction entry. Up to 6 arguments in registers. * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
* *
...@@ -196,12 +138,6 @@ ENDPROC(native_usergs_sysret64) ...@@ -196,12 +138,6 @@ ENDPROC(native_usergs_sysret64)
*/ */
ENTRY(system_call) ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -219,8 +155,8 @@ GLOBAL(system_call_after_swapgs) ...@@ -219,8 +155,8 @@ GLOBAL(system_call_after_swapgs)
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER_DS /* pt_regs->ss */ pushq $__USER_DS /* pt_regs->ss */
pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
/* /*
* Re-enable interrupts. * Re-enable interrupts.
* We use 'rsp_scratch' as a scratch space, hence irq-off block above * We use 'rsp_scratch' as a scratch space, hence irq-off block above
...@@ -229,22 +165,20 @@ GLOBAL(system_call_after_swapgs) ...@@ -229,22 +165,20 @@ GLOBAL(system_call_after_swapgs)
* with using rsp_scratch: * with using rsp_scratch:
*/ */
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %r11 /* pt_regs->flags */ pushq %r11 /* pt_regs->flags */
pushq_cfi $__USER_CS /* pt_regs->cs */ pushq $__USER_CS /* pt_regs->cs */
pushq_cfi %rcx /* pt_regs->ip */ pushq %rcx /* pt_regs->ip */
CFI_REL_OFFSET rip,0 pushq %rax /* pt_regs->orig_ax */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */
pushq_cfi_reg rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */
pushq_cfi_reg r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */
pushq_cfi_reg r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */
pushq_cfi_reg r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */
pushq_cfi_reg r11 /* pt_regs->r11 */
sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 6*8
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz tracesys jnz tracesys
...@@ -282,13 +216,9 @@ system_call_fastpath: ...@@ -282,13 +216,9 @@ system_call_fastpath:
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
CFI_REMEMBER_STATE
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RIP(%rsp),%rcx movq RIP(%rsp),%rcx
CFI_REGISTER rip,rcx
movq EFLAGS(%rsp),%r11 movq EFLAGS(%rsp),%r11
/*CFI_REGISTER rflags,r11*/
movq RSP(%rsp),%rsp movq RSP(%rsp),%rsp
/* /*
* 64bit SYSRET restores rip from rcx, * 64bit SYSRET restores rip from rcx,
...@@ -307,8 +237,6 @@ system_call_fastpath: ...@@ -307,8 +237,6 @@ system_call_fastpath:
*/ */
USERGS_SYSRET64 USERGS_SYSRET64
CFI_RESTORE_STATE
/* Do syscall entry tracing */ /* Do syscall entry tracing */
tracesys: tracesys:
movq %rsp, %rdi movq %rsp, %rdi
...@@ -374,9 +302,9 @@ int_careful: ...@@ -374,9 +302,9 @@ int_careful:
jnc int_very_careful jnc int_very_careful
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi pushq %rdi
SCHEDULE_USER SCHEDULE_USER
popq_cfi %rdi popq %rdi
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
jmp int_with_check jmp int_with_check
...@@ -389,10 +317,10 @@ int_very_careful: ...@@ -389,10 +317,10 @@ int_very_careful:
/* Check for syscall exit trace */ /* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal jz int_signal
pushq_cfi %rdi pushq %rdi
leaq 8(%rsp),%rdi # &ptregs -> arg1 leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace_leave call syscall_trace_leave
popq_cfi %rdi popq %rdi
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
jmp int_restore_rest jmp int_restore_rest
...@@ -475,27 +403,21 @@ syscall_return: ...@@ -475,27 +403,21 @@ syscall_return:
* perf profiles. Nothing jumps here. * perf profiles. Nothing jumps here.
*/ */
syscall_return_via_sysret: syscall_return_via_sysret:
CFI_REMEMBER_STATE
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp),%rsp movq RSP(%rsp),%rsp
USERGS_SYSRET64 USERGS_SYSRET64
CFI_RESTORE_STATE
opportunistic_sysret_failed: opportunistic_sysret_failed:
SWAPGS SWAPGS
jmp restore_c_regs_and_iret jmp restore_c_regs_and_iret
CFI_ENDPROC
END(system_call) END(system_call)
.macro FORK_LIKE func .macro FORK_LIKE func
ENTRY(stub_\func) ENTRY(stub_\func)
CFI_STARTPROC
DEFAULT_FRAME 0, 8 /* offset 8: return address */
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
jmp sys_\func jmp sys_\func
CFI_ENDPROC
END(stub_\func) END(stub_\func)
.endm .endm
...@@ -504,8 +426,6 @@ END(stub_\func) ...@@ -504,8 +426,6 @@ END(stub_\func)
FORK_LIKE vfork FORK_LIKE vfork
ENTRY(stub_execve) ENTRY(stub_execve)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call sys_execve call sys_execve
return_from_execve: return_from_execve:
testl %eax, %eax testl %eax, %eax
...@@ -515,11 +435,9 @@ return_from_execve: ...@@ -515,11 +435,9 @@ return_from_execve:
1: 1:
/* must use IRET code path (pt_regs->cs may have changed) */ /* must use IRET code path (pt_regs->cs may have changed) */
addq $8, %rsp addq $8, %rsp
CFI_ADJUST_CFA_OFFSET -8
ZERO_EXTRA_REGS ZERO_EXTRA_REGS
movq %rax,RAX(%rsp) movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_execve) END(stub_execve)
/* /*
* Remaining execve stubs are only 7 bytes long. * Remaining execve stubs are only 7 bytes long.
...@@ -527,32 +445,23 @@ END(stub_execve) ...@@ -527,32 +445,23 @@ END(stub_execve)
*/ */
.align 8 .align 8
GLOBAL(stub_execveat) GLOBAL(stub_execveat)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call sys_execveat call sys_execveat
jmp return_from_execve jmp return_from_execve
CFI_ENDPROC
END(stub_execveat) END(stub_execveat)
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
.align 8 .align 8
GLOBAL(stub_x32_execve) GLOBAL(stub_x32_execve)
GLOBAL(stub32_execve) GLOBAL(stub32_execve)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call compat_sys_execve call compat_sys_execve
jmp return_from_execve jmp return_from_execve
CFI_ENDPROC
END(stub32_execve) END(stub32_execve)
END(stub_x32_execve) END(stub_x32_execve)
.align 8 .align 8
GLOBAL(stub_x32_execveat) GLOBAL(stub_x32_execveat)
GLOBAL(stub32_execveat) GLOBAL(stub32_execveat)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call compat_sys_execveat call compat_sys_execveat
jmp return_from_execve jmp return_from_execve
CFI_ENDPROC
END(stub32_execveat) END(stub32_execveat)
END(stub_x32_execveat) END(stub_x32_execveat)
#endif #endif
...@@ -562,8 +471,6 @@ END(stub_x32_execveat) ...@@ -562,8 +471,6 @@ END(stub_x32_execveat)
* This cannot be done with SYSRET, so use the IRET return path instead. * This cannot be done with SYSRET, so use the IRET return path instead.
*/ */
ENTRY(stub_rt_sigreturn) ENTRY(stub_rt_sigreturn)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
/* /*
* SAVE_EXTRA_REGS result is not normally needed: * SAVE_EXTRA_REGS result is not normally needed:
* sigreturn overwrites all pt_regs->GPREGS. * sigreturn overwrites all pt_regs->GPREGS.
...@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn) ...@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
call sys_rt_sigreturn call sys_rt_sigreturn
return_from_stub: return_from_stub:
addq $8, %rsp addq $8, %rsp
CFI_ADJUST_CFA_OFFSET -8
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
movq %rax,RAX(%rsp) movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_rt_sigreturn) END(stub_rt_sigreturn)
#ifdef CONFIG_X86_X32_ABI #ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn) ENTRY(stub_x32_rt_sigreturn)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
call sys32_x32_rt_sigreturn call sys32_x32_rt_sigreturn
jmp return_from_stub jmp return_from_stub
CFI_ENDPROC
END(stub_x32_rt_sigreturn) END(stub_x32_rt_sigreturn)
#endif #endif
...@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn) ...@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
* rdi: prev task we switched from * rdi: prev task we switched from
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
DEFAULT_FRAME
LOCK ; btr $TIF_FORK,TI_flags(%r8) LOCK ; btr $TIF_FORK,TI_flags(%r8)
pushq_cfi $0x0002 pushq $0x0002
popfq_cfi # reset kernel eflags popfq # reset kernel eflags
call schedule_tail # rdi: 'prev' task parameter call schedule_tail # rdi: 'prev' task parameter
...@@ -628,7 +529,6 @@ ENTRY(ret_from_fork) ...@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
movl $0, RAX(%rsp) movl $0, RAX(%rsp)
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
/* /*
...@@ -637,16 +537,13 @@ END(ret_from_fork) ...@@ -637,16 +537,13 @@ END(ret_from_fork)
*/ */
.align 8 .align 8
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ pushq $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1 vector=vector+1
jmp common_interrupt jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -8
.align 8 .align 8
.endr .endr
CFI_ENDPROC
END(irq_entries_start) END(irq_entries_start)
/* /*
...@@ -688,17 +585,7 @@ END(irq_entries_start) ...@@ -688,17 +585,7 @@ END(irq_entries_start)
movq %rsp, %rsi movq %rsp, %rsi
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
CFI_DEF_CFA_REGISTER rsi
pushq %rsi pushq %rsi
/*
* For debugger:
* "CFA (Current Frame Address) is the value on stack + offset"
*/
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
0x77 /* DW_OP_breg7 (rsp) */, 0, \
0x06 /* DW_OP_deref */, \
0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
0x22 /* DW_OP_plus */
/* We entered an interrupt context - irqs are off: */ /* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -711,7 +598,6 @@ END(irq_entries_start) ...@@ -711,7 +598,6 @@ END(irq_entries_start)
*/ */
.p2align CONFIG_X86_L1_CACHE_SHIFT .p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt: common_interrupt:
XCPT_FRAME
ASM_CLAC ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ interrupt do_IRQ
...@@ -723,11 +609,8 @@ ret_from_intr: ...@@ -723,11 +609,8 @@ ret_from_intr:
/* Restore saved previous stack */ /* Restore saved previous stack */
popq %rsi popq %rsi
CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
/* return code expects complete pt_regs - adjust rsp accordingly: */ /* return code expects complete pt_regs - adjust rsp accordingly: */
leaq -RBP(%rsi),%rsp leaq -RBP(%rsi),%rsp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET RBP
testb $3, CS(%rsp) testb $3, CS(%rsp)
jz retint_kernel jz retint_kernel
...@@ -743,7 +626,6 @@ retint_check: ...@@ -743,7 +626,6 @@ retint_check:
LOCKDEP_SYS_EXIT_IRQ LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx movl TI_flags(%rcx),%edx
andl %edi,%edx andl %edi,%edx
CFI_REMEMBER_STATE
jnz retint_careful jnz retint_careful
retint_swapgs: /* return to user-space */ retint_swapgs: /* return to user-space */
...@@ -807,8 +689,8 @@ native_irq_return_iret: ...@@ -807,8 +689,8 @@ native_irq_return_iret:
#ifdef CONFIG_X86_ESPFIX64 #ifdef CONFIG_X86_ESPFIX64
native_irq_return_ldt: native_irq_return_ldt:
pushq_cfi %rax pushq %rax
pushq_cfi %rdi pushq %rdi
SWAPGS SWAPGS
movq PER_CPU_VAR(espfix_waddr),%rdi movq PER_CPU_VAR(espfix_waddr),%rdi
movq %rax,(0*8)(%rdi) /* RAX */ movq %rax,(0*8)(%rdi) /* RAX */
...@@ -823,24 +705,23 @@ native_irq_return_ldt: ...@@ -823,24 +705,23 @@ native_irq_return_ldt:
movq (5*8)(%rsp),%rax /* RSP */ movq (5*8)(%rsp),%rax /* RSP */
movq %rax,(4*8)(%rdi) movq %rax,(4*8)(%rdi)
andl $0xffff0000,%eax andl $0xffff0000,%eax
popq_cfi %rdi popq %rdi
orq PER_CPU_VAR(espfix_stack),%rax orq PER_CPU_VAR(espfix_stack),%rax
SWAPGS SWAPGS
movq %rax,%rsp movq %rax,%rsp
popq_cfi %rax popq %rax
jmp native_irq_return_iret jmp native_irq_return_iret
#endif #endif
/* edi: workmask, edx: work */ /* edi: workmask, edx: work */
retint_careful: retint_careful:
CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED,%edx
jnc retint_signal jnc retint_signal
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi pushq %rdi
SCHEDULE_USER SCHEDULE_USER
popq_cfi %rdi popq %rdi
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -862,7 +743,6 @@ retint_signal: ...@@ -862,7 +743,6 @@ retint_signal:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
jmp retint_with_reschedule jmp retint_with_reschedule
CFI_ENDPROC
END(common_interrupt) END(common_interrupt)
/* /*
...@@ -870,13 +750,11 @@ END(common_interrupt) ...@@ -870,13 +750,11 @@ END(common_interrupt)
*/ */
.macro apicinterrupt3 num sym do_sym .macro apicinterrupt3 num sym do_sym
ENTRY(\sym) ENTRY(\sym)
INTR_FRAME
ASM_CLAC ASM_CLAC
pushq_cfi $~(\num) pushq $~(\num)
.Lcommon_\sym: .Lcommon_\sym:
interrupt \do_sym interrupt \do_sym
jmp ret_from_intr jmp ret_from_intr
CFI_ENDPROC
END(\sym) END(\sym)
.endm .endm
...@@ -959,24 +837,17 @@ ENTRY(\sym) ...@@ -959,24 +837,17 @@ ENTRY(\sym)
.error "using shift_ist requires paranoid=1" .error "using shift_ist requires paranoid=1"
.endif .endif
.if \has_error_code
XCPT_FRAME
.else
INTR_FRAME
.endif
ASM_CLAC ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
.ifeq \has_error_code .ifeq \has_error_code
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif .endif
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
.if \paranoid .if \paranoid
.if \paranoid == 1 .if \paranoid == 1
CFI_REMEMBER_STATE
testb $3, CS(%rsp) /* If coming from userspace, switch */ testb $3, CS(%rsp) /* If coming from userspace, switch */
jnz 1f /* stacks. */ jnz 1f /* stacks. */
.endif .endif
...@@ -986,8 +857,6 @@ ENTRY(\sym) ...@@ -986,8 +857,6 @@ ENTRY(\sym)
.endif .endif
/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
DEFAULT_FRAME 0
.if \paranoid .if \paranoid
.if \shift_ist != -1 .if \shift_ist != -1
TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
...@@ -1023,7 +892,6 @@ ENTRY(\sym) ...@@ -1023,7 +892,6 @@ ENTRY(\sym)
.endif .endif
.if \paranoid == 1 .if \paranoid == 1
CFI_RESTORE_STATE
/* /*
* Paranoid entry from userspace. Switch stacks and treat it * Paranoid entry from userspace. Switch stacks and treat it
* as a normal entry. This means that paranoid handlers * as a normal entry. This means that paranoid handlers
...@@ -1032,7 +900,6 @@ ENTRY(\sym) ...@@ -1032,7 +900,6 @@ ENTRY(\sym)
1: 1:
call error_entry call error_entry
DEFAULT_FRAME 0
movq %rsp,%rdi /* pt_regs pointer */ movq %rsp,%rdi /* pt_regs pointer */
call sync_regs call sync_regs
...@@ -1051,8 +918,6 @@ ENTRY(\sym) ...@@ -1051,8 +918,6 @@ ENTRY(\sym)
jmp error_exit /* %ebx: no swapgs flag */ jmp error_exit /* %ebx: no swapgs flag */
.endif .endif
CFI_ENDPROC
END(\sym) END(\sym)
.endm .endm
...@@ -1085,17 +950,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 ...@@ -1085,17 +950,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
/* Reload gs selector with exception handling */ /* Reload gs selector with exception handling */
/* edi: new selector */ /* edi: new selector */
ENTRY(native_load_gs_index) ENTRY(native_load_gs_index)
CFI_STARTPROC pushfq
pushfq_cfi
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS SWAPGS
gs_change: gs_change:
movl %edi,%gs movl %edi,%gs
2: mfence /* workaround */ 2: mfence /* workaround */
SWAPGS SWAPGS
popfq_cfi popfq
ret ret
CFI_ENDPROC
END(native_load_gs_index) END(native_load_gs_index)
_ASM_EXTABLE(gs_change,bad_gs) _ASM_EXTABLE(gs_change,bad_gs)
...@@ -1110,22 +973,15 @@ bad_gs: ...@@ -1110,22 +973,15 @@ bad_gs:
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack) ENTRY(do_softirq_own_stack)
CFI_STARTPROC pushq %rbp
pushq_cfi %rbp
CFI_REL_OFFSET rbp,0
mov %rsp,%rbp mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
cmove PER_CPU_VAR(irq_stack_ptr),%rsp cmove PER_CPU_VAR(irq_stack_ptr),%rsp
push %rbp # backlink for old unwinder push %rbp # backlink for old unwinder
call __do_softirq call __do_softirq
leaveq leaveq
CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
ret ret
CFI_ENDPROC
END(do_softirq_own_stack) END(do_softirq_own_stack)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
...@@ -1145,28 +1001,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 ...@@ -1145,28 +1001,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* activation and restart the handler using the previous one. * activation and restart the handler using the previous one.
*/ */
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
CFI_STARTPROC
/* /*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
* see the correct pointer to the pt_regs * see the correct pointer to the pt_regs
*/ */
movq %rdi, %rsp # we don't return, adjust the stack frame movq %rdi, %rsp # we don't return, adjust the stack frame
CFI_ENDPROC
DEFAULT_FRAME
11: incl PER_CPU_VAR(irq_count) 11: incl PER_CPU_VAR(irq_count)
movq %rsp,%rbp movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
pushq %rbp # backlink for old unwinder pushq %rbp # backlink for old unwinder
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
popq %rsp popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp error_exit jmp error_exit
CFI_ENDPROC
END(xen_do_hypervisor_callback) END(xen_do_hypervisor_callback)
/* /*
...@@ -1183,16 +1033,8 @@ END(xen_do_hypervisor_callback) ...@@ -1183,16 +1033,8 @@ END(xen_do_hypervisor_callback)
* with its current contents: any discrepancy means we in category 1. * with its current contents: any discrepancy means we in category 1.
*/ */
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
INTR_FRAME 1 (6*8)
/*CFI_REL_OFFSET gs,GS*/
/*CFI_REL_OFFSET fs,FS*/
/*CFI_REL_OFFSET es,ES*/
/*CFI_REL_OFFSET ds,DS*/
CFI_REL_OFFSET r11,8
CFI_REL_OFFSET rcx,0
movl %ds,%ecx movl %ds,%ecx
cmpw %cx,0x10(%rsp) cmpw %cx,0x10(%rsp)
CFI_REMEMBER_STATE
jne 1f jne 1f
movl %es,%ecx movl %es,%ecx
cmpw %cx,0x18(%rsp) cmpw %cx,0x18(%rsp)
...@@ -1205,29 +1047,21 @@ ENTRY(xen_failsafe_callback) ...@@ -1205,29 +1047,21 @@ ENTRY(xen_failsafe_callback)
jne 1f jne 1f
/* All segments match their saved values => Category 2 (Bad IRET). */ /* All segments match their saved values => Category 2 (Bad IRET). */
movq (%rsp),%rcx movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11 movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30 pushq $0 /* RIP */
pushq_cfi $0 /* RIP */ pushq %r11
pushq_cfi %r11 pushq %rcx
pushq_cfi %rcx
jmp general_protection jmp general_protection
CFI_RESTORE_STATE
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp),%rcx movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11 movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30 pushq $-1 /* orig_ax = -1 => not a system call */
pushq_cfi $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS SAVE_C_REGS
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
jmp error_exit jmp error_exit
CFI_ENDPROC
END(xen_failsafe_callback) END(xen_failsafe_callback)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
...@@ -1263,7 +1097,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector( ...@@ -1263,7 +1097,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/ */
ENTRY(paranoid_entry) ENTRY(paranoid_entry)
XCPT_FRAME 1 15*8
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
...@@ -1275,7 +1108,6 @@ ENTRY(paranoid_entry) ...@@ -1275,7 +1108,6 @@ ENTRY(paranoid_entry)
SWAPGS SWAPGS
xorl %ebx,%ebx xorl %ebx,%ebx
1: ret 1: ret
CFI_ENDPROC
END(paranoid_entry) END(paranoid_entry)
/* /*
...@@ -1290,7 +1122,6 @@ END(paranoid_entry) ...@@ -1290,7 +1122,6 @@ END(paranoid_entry)
*/ */
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(paranoid_exit) ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */ testl %ebx,%ebx /* swapgs needed? */
...@@ -1305,7 +1136,6 @@ paranoid_exit_restore: ...@@ -1305,7 +1136,6 @@ paranoid_exit_restore:
RESTORE_C_REGS RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8 REMOVE_PT_GPREGS_FROM_STACK 8
INTERRUPT_RETURN INTERRUPT_RETURN
CFI_ENDPROC
END(paranoid_exit) END(paranoid_exit)
/* /*
...@@ -1313,7 +1143,6 @@ END(paranoid_exit) ...@@ -1313,7 +1143,6 @@ END(paranoid_exit)
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/ */
ENTRY(error_entry) ENTRY(error_entry)
XCPT_FRAME 1 15*8
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
...@@ -1333,7 +1162,6 @@ error_sti: ...@@ -1333,7 +1162,6 @@ error_sti:
* for these here too. * for these here too.
*/ */
error_kernelspace: error_kernelspace:
CFI_REL_OFFSET rcx, RCX+8
incl %ebx incl %ebx
leaq native_irq_return_iret(%rip),%rcx leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp) cmpq %rcx,RIP+8(%rsp)
...@@ -1357,13 +1185,11 @@ error_bad_iret: ...@@ -1357,13 +1185,11 @@ error_bad_iret:
mov %rax,%rsp mov %rax,%rsp
decl %ebx /* Return to usergs */ decl %ebx /* Return to usergs */
jmp error_sti jmp error_sti
CFI_ENDPROC
END(error_entry) END(error_entry)
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit) ENTRY(error_exit)
DEFAULT_FRAME
movl %ebx,%eax movl %ebx,%eax
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
...@@ -1377,12 +1203,10 @@ ENTRY(error_exit) ...@@ -1377,12 +1203,10 @@ ENTRY(error_exit)
andl %edi,%edx andl %edi,%edx
jnz retint_careful jnz retint_careful
jmp retint_swapgs jmp retint_swapgs
CFI_ENDPROC
END(error_exit) END(error_exit)
/* Runs on exception stack */ /* Runs on exception stack */
ENTRY(nmi) ENTRY(nmi)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
/* /*
* We allow breakpoints in NMIs. If a breakpoint occurs, then * We allow breakpoints in NMIs. If a breakpoint occurs, then
...@@ -1417,8 +1241,7 @@ ENTRY(nmi) ...@@ -1417,8 +1241,7 @@ ENTRY(nmi)
*/ */
/* Use %rdx as our temp variable throughout */ /* Use %rdx as our temp variable throughout */
pushq_cfi %rdx pushq %rdx
CFI_REL_OFFSET rdx, 0
/* /*
* If %cs was not the kernel segment, then the NMI triggered in user * If %cs was not the kernel segment, then the NMI triggered in user
...@@ -1452,8 +1275,6 @@ ENTRY(nmi) ...@@ -1452,8 +1275,6 @@ ENTRY(nmi)
jb first_nmi jb first_nmi
/* Ah, it is within the NMI stack, treat it as nested */ /* Ah, it is within the NMI stack, treat it as nested */
CFI_REMEMBER_STATE
nested_nmi: nested_nmi:
/* /*
* Do nothing if we interrupted the fixup in repeat_nmi. * Do nothing if we interrupted the fixup in repeat_nmi.
...@@ -1471,26 +1292,22 @@ nested_nmi: ...@@ -1471,26 +1292,22 @@ nested_nmi:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */ /* Set up the interrupted NMIs stack to jump to repeat_nmi */
leaq -1*8(%rsp), %rdx leaq -1*8(%rsp), %rdx
movq %rdx, %rsp movq %rdx, %rsp
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS pushq $__KERNEL_DS
pushq_cfi %rdx pushq %rdx
pushfq_cfi pushfq
pushq_cfi $__KERNEL_CS pushq $__KERNEL_CS
pushq_cfi $repeat_nmi pushq $repeat_nmi
/* Put stack back */ /* Put stack back */
addq $(6*8), %rsp addq $(6*8), %rsp
CFI_ADJUST_CFA_OFFSET -6*8
nested_nmi_out: nested_nmi_out:
popq_cfi %rdx popq %rdx
CFI_RESTORE rdx
/* No need to check faults here */ /* No need to check faults here */
INTERRUPT_RETURN INTERRUPT_RETURN
CFI_RESTORE_STATE
first_nmi: first_nmi:
/* /*
* Because nested NMIs will use the pushed location that we * Because nested NMIs will use the pushed location that we
...@@ -1529,22 +1346,19 @@ first_nmi: ...@@ -1529,22 +1346,19 @@ first_nmi:
*/ */
/* Do not pop rdx, nested NMIs will corrupt that part of the stack */ /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
movq (%rsp), %rdx movq (%rsp), %rdx
CFI_RESTORE rdx
/* Set the NMI executing variable on the stack. */ /* Set the NMI executing variable on the stack. */
pushq_cfi $1 pushq $1
/* /*
* Leave room for the "copied" frame * Leave room for the "copied" frame
*/ */
subq $(5*8), %rsp subq $(5*8), %rsp
CFI_ADJUST_CFA_OFFSET 5*8
/* Copy the stack frame to the Saved frame */ /* Copy the stack frame to the Saved frame */
.rept 5 .rept 5
pushq_cfi 11*8(%rsp) pushq 11*8(%rsp)
.endr .endr
CFI_DEF_CFA_OFFSET 5*8
/* Everything up to here is safe from nested NMIs */ /* Everything up to here is safe from nested NMIs */
...@@ -1567,12 +1381,10 @@ repeat_nmi: ...@@ -1567,12 +1381,10 @@ repeat_nmi:
/* Make another copy, this one may be modified by nested NMIs */ /* Make another copy, this one may be modified by nested NMIs */
addq $(10*8), %rsp addq $(10*8), %rsp
CFI_ADJUST_CFA_OFFSET -10*8
.rept 5 .rept 5
pushq_cfi -6*8(%rsp) pushq -6*8(%rsp)
.endr .endr
subq $(5*8), %rsp subq $(5*8), %rsp
CFI_DEF_CFA_OFFSET 5*8
end_repeat_nmi: end_repeat_nmi:
/* /*
...@@ -1580,7 +1392,7 @@ end_repeat_nmi: ...@@ -1580,7 +1392,7 @@ end_repeat_nmi:
* NMI if the first NMI took an exception and reset our iret stack * NMI if the first NMI took an exception and reset our iret stack
* so that we repeat another NMI. * so that we repeat another NMI.
*/ */
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq $-1 /* ORIG_RAX: no syscall to restart */
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
/* /*
...@@ -1591,7 +1403,6 @@ end_repeat_nmi: ...@@ -1591,7 +1403,6 @@ end_repeat_nmi:
* exceptions might do. * exceptions might do.
*/ */
call paranoid_entry call paranoid_entry
DEFAULT_FRAME 0
/* /*
* Save off the CR2 register. If we take a page fault in the NMI then * Save off the CR2 register. If we take a page fault in the NMI then
...@@ -1628,13 +1439,10 @@ nmi_restore: ...@@ -1628,13 +1439,10 @@ nmi_restore:
/* Clear the NMI executing stack variable */ /* Clear the NMI executing stack variable */
movq $0, 5*8(%rsp) movq $0, 5*8(%rsp)
jmp irq_return jmp irq_return
CFI_ENDPROC
END(nmi) END(nmi)
ENTRY(ignore_sysret) ENTRY(ignore_sysret)
CFI_STARTPROC
mov $-ENOSYS,%eax mov $-ENOSYS,%eax
sysret sysret
CFI_ENDPROC
END(ignore_sysret) END(ignore_sysret)
...@@ -11,26 +11,23 @@ ...@@ -11,26 +11,23 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* if you want SMP support, implement these with real spinlocks */ /* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg .macro LOCK reg
pushfl_cfi pushfl
cli cli
.endm .endm
.macro UNLOCK reg .macro UNLOCK reg
popfl_cfi popfl
.endm .endm
#define BEGIN(op) \ #define BEGIN(op) \
.macro endp; \ .macro endp; \
CFI_ENDPROC; \
ENDPROC(atomic64_##op##_386); \ ENDPROC(atomic64_##op##_386); \
.purgem endp; \ .purgem endp; \
.endm; \ .endm; \
ENTRY(atomic64_##op##_386); \ ENTRY(atomic64_##op##_386); \
CFI_STARTPROC; \
LOCK v; LOCK v;
#define ENDP endp #define ENDP endp
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
.macro read64 reg .macro read64 reg
movl %ebx, %eax movl %ebx, %eax
...@@ -22,16 +21,11 @@ ...@@ -22,16 +21,11 @@
.endm .endm
ENTRY(atomic64_read_cx8) ENTRY(atomic64_read_cx8)
CFI_STARTPROC
read64 %ecx read64 %ecx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_read_cx8) ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8) ENTRY(atomic64_set_cx8)
CFI_STARTPROC
1: 1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes /* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */ * are atomic on 586 and newer */
...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8) ...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_set_cx8) ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8) ENTRY(atomic64_xchg_cx8)
CFI_STARTPROC
1: 1:
LOCK_PREFIX LOCK_PREFIX
cmpxchg8b (%esi) cmpxchg8b (%esi)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8) ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc .macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi
movl %eax, %esi movl %eax, %esi
movl %edx, %edi movl %edx, %edi
...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ebp popl %ebp
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -93,8 +81,7 @@ addsub_return sub sub sbb ...@@ -93,8 +81,7 @@ addsub_return sub sub sbb
.macro incdec_return func ins insc .macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -119,8 +105,7 @@ incdec_return inc add adc ...@@ -119,8 +105,7 @@ incdec_return inc add adc
incdec_return dec sub sbb incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8) ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8) ...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
2: 2:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8) ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8) ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx
/* these just push these two parameters on the stack */ /* these just push these two parameters on the stack */
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg ecx pushl %ecx
movl %eax, %ebp movl %eax, %ebp
movl %edx, %edi movl %edx, %edi
...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8) ...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
movl $1, %eax movl $1, %eax
3: 3:
addl $8, %esp addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8 popl %ebx
popl_cfi_reg ebx popl %ebp
popl_cfi_reg ebp
ret ret
4: 4:
cmpl %edx, 4(%esp) cmpl %edx, 4(%esp)
jne 2b jne 2b
xorl %eax, %eax xorl %eax, %eax
jmp 3b jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_add_unless_cx8) ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8) ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8) ...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax movl $1, %eax
3: 3:
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8) ENDPROC(atomic64_inc_not_zero_cx8)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff movl 12(%esp),%esi # Function arg: unsigned char *buff
...@@ -129,10 +127,9 @@ ENTRY(csum_partial) ...@@ -129,10 +127,9 @@ ENTRY(csum_partial)
jz 8f jz 8f
roll $8, %eax roll $8, %eax
8: 8:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#else #else
...@@ -140,9 +137,8 @@ ENDPROC(csum_partial) ...@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
/* Version for PentiumII/PPro */ /* Version for PentiumII/PPro */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf movl 12(%esp),%esi # Function arg: const unsigned char *buf
...@@ -249,10 +245,9 @@ ENTRY(csum_partial) ...@@ -249,10 +245,9 @@ ENTRY(csum_partial)
jz 90f jz 90f
roll $8, %eax roll $8, %eax
90: 90:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#endif #endif
...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, ...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define FP 12 #define FP 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
subl $4,%esp subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4 pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src movl ARGBASE+4(%esp),%esi # src
...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) ) ...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
.previous .previous
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi %ecx # equivalent to addl $4,%esp popl %ecx # equivalent to addl $4,%esp
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#else #else
...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic) ...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12 #define ARGBASE 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi
movl ARGBASE+4(%esp),%esi #src movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len movl ARGBASE+12(%esp),%ecx #len
...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) ) ...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
jmp 7b jmp 7b
.previous .previous
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#undef ROUND #undef ROUND
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -15,7 +14,6 @@ ...@@ -15,7 +14,6 @@
* %rdi - page * %rdi - page
*/ */
ENTRY(clear_page) ENTRY(clear_page)
CFI_STARTPROC
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
"jmp clear_page_c_e", X86_FEATURE_ERMS "jmp clear_page_c_e", X86_FEATURE_ERMS
...@@ -24,11 +22,9 @@ ENTRY(clear_page) ...@@ -24,11 +22,9 @@ ENTRY(clear_page)
xorl %eax,%eax xorl %eax,%eax
rep stosq rep stosq
ret ret
CFI_ENDPROC
ENDPROC(clear_page) ENDPROC(clear_page)
ENTRY(clear_page_orig) ENTRY(clear_page_orig)
CFI_STARTPROC
xorl %eax,%eax xorl %eax,%eax
movl $4096/64,%ecx movl $4096/64,%ecx
...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig) ...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
jnz .Lloop jnz .Lloop
nop nop
ret ret
CFI_ENDPROC
ENDPROC(clear_page_orig) ENDPROC(clear_page_orig)
ENTRY(clear_page_c_e) ENTRY(clear_page_c_e)
CFI_STARTPROC
movl $4096,%ecx movl $4096,%ecx
xorl %eax,%eax xorl %eax,%eax
rep stosb rep stosb
ret ret
CFI_ENDPROC
ENDPROC(clear_page_c_e) ENDPROC(clear_page_c_e)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* *
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/percpu.h> #include <asm/percpu.h>
.text .text
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
* %al : Operation successful * %al : Operation successful
*/ */
ENTRY(this_cpu_cmpxchg16b_emu) ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
...@@ -32,7 +30,7 @@ CFI_STARTPROC ...@@ -32,7 +30,7 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of # *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros). # macros).
# #
pushfq_cfi pushfq
cli cli
cmpq PER_CPU_VAR((%rsi)), %rax cmpq PER_CPU_VAR((%rsi)), %rax
...@@ -43,17 +41,13 @@ CFI_STARTPROC ...@@ -43,17 +41,13 @@ CFI_STARTPROC
movq %rbx, PER_CPU_VAR((%rsi)) movq %rbx, PER_CPU_VAR((%rsi))
movq %rcx, PER_CPU_VAR(8(%rsi)) movq %rcx, PER_CPU_VAR(8(%rsi))
CFI_REMEMBER_STATE popfq
popfq_cfi
mov $1, %al mov $1, %al
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
popfq_cfi popfq
xor %al,%al xor %al,%al
ret ret
CFI_ENDPROC
ENDPROC(this_cpu_cmpxchg16b_emu) ENDPROC(this_cpu_cmpxchg16b_emu)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
.text .text
...@@ -20,14 +19,13 @@ ...@@ -20,14 +19,13 @@
* %ecx : high 32 bits of new value * %ecx : high 32 bits of new value
*/ */
ENTRY(cmpxchg8b_emu) ENTRY(cmpxchg8b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg8b (%esi)' on UP except we don't # Emulate 'cmpxchg8b (%esi)' on UP except we don't
# set the whole ZF thing (caller will just compare # set the whole ZF thing (caller will just compare
# eax:edx with the expected value) # eax:edx with the expected value)
# #
pushfl_cfi pushfl
cli cli
cmpl (%esi), %eax cmpl (%esi), %eax
...@@ -38,18 +36,15 @@ CFI_STARTPROC ...@@ -38,18 +36,15 @@ CFI_STARTPROC
movl %ebx, (%esi) movl %ebx, (%esi)
movl %ecx, 4(%esi) movl %ecx, 4(%esi)
CFI_REMEMBER_STATE popfl
popfl_cfi
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
movl (%esi), %eax movl (%esi), %eax
.Lhalf_same: .Lhalf_same:
movl 4(%esi), %edx movl 4(%esi), %edx
popfl_cfi popfl
ret ret
CFI_ENDPROC
ENDPROC(cmpxchg8b_emu) ENDPROC(cmpxchg8b_emu)
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -13,22 +12,16 @@ ...@@ -13,22 +12,16 @@
*/ */
ALIGN ALIGN
ENTRY(copy_page) ENTRY(copy_page)
CFI_STARTPROC
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx movl $4096/8, %ecx
rep movsq rep movsq
ret ret
CFI_ENDPROC
ENDPROC(copy_page) ENDPROC(copy_page)
ENTRY(copy_page_regs) ENTRY(copy_page_regs)
CFI_STARTPROC
subq $2*8, %rsp subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx, (%rsp) movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0
movq %r12, 1*8(%rsp) movq %r12, 1*8(%rsp)
CFI_REL_OFFSET r12, 1*8
movl $(4096/64)-5, %ecx movl $(4096/64)-5, %ecx
.p2align 4 .p2align 4
...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs) ...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
jnz .Loop2 jnz .Loop2
movq (%rsp), %rbx movq (%rsp), %rbx
CFI_RESTORE rbx
movq 1*8(%rsp), %r12 movq 1*8(%rsp), %r12
CFI_RESTORE r12
addq $2*8, %rsp addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8
ret ret
CFI_ENDPROC
ENDPROC(copy_page_regs) ENDPROC(copy_page_regs)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -18,7 +17,6 @@ ...@@ -18,7 +17,6 @@
/* Standard copy_to_user with segment limit checking */ /* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user) ENTRY(_copy_to_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rdi,%rcx movq %rdi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user) ...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_to_user) ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */ /* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user) ENTRY(_copy_from_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rsi,%rcx movq %rsi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user) ...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_from_user) ENDPROC(_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
/* must zero dest */ /* must zero dest */
ENTRY(bad_from_user) ENTRY(bad_from_user)
bad_from_user: bad_from_user:
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
xorl %eax,%eax xorl %eax,%eax
rep rep
...@@ -62,7 +56,6 @@ bad_from_user: ...@@ -62,7 +56,6 @@ bad_from_user:
bad_to_user: bad_to_user:
movl %edx,%eax movl %edx,%eax
ret ret
CFI_ENDPROC
ENDPROC(bad_from_user) ENDPROC(bad_from_user)
.previous .previous
...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user) ...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_unrolled) ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled) ...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(copy_user_generic_unrolled) ENDPROC(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions. /* Some CPUs run faster using the string copy instructions.
...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled) ...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_string) ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */ jb 2f /* less than 8 bytes, go to byte copy loop */
...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string) ...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b) _ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b) _ASM_EXTABLE(3b,12b)
CFI_ENDPROC
ENDPROC(copy_user_generic_string) ENDPROC(copy_user_generic_string)
/* /*
...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string) ...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_enhanced_fast_string) ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
movl %edx,%ecx movl %edx,%ecx
1: rep 1: rep
...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string) ...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
.previous .previous
_ASM_EXTABLE(1b,12b) _ASM_EXTABLE(1b,12b)
CFI_ENDPROC
ENDPROC(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string)
/* /*
...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string) ...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
* This will force destination/source out of cache for more performance. * This will force destination/source out of cache for more performance.
*/ */
ENTRY(__copy_user_nocache) ENTRY(__copy_user_nocache)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache) ...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* for more details. No warranty for anything given at all. * for more details. No warranty for anything given at all.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -47,23 +46,16 @@ ...@@ -47,23 +46,16 @@
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
cmpl $3*64, %edx cmpl $3*64, %edx
jle .Lignore jle .Lignore
.Lignore: .Lignore:
subq $7*8, %rsp subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8
movq %rbx, 2*8(%rsp) movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8
movq %r12, 3*8(%rsp) movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
movq %r14, 4*8(%rsp) movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8
movq %r13, 5*8(%rsp) movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8
movq %rbp, 6*8(%rsp) movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8
movq %r8, (%rsp) movq %r8, (%rsp)
movq %r9, 1*8(%rsp) movq %r9, 1*8(%rsp)
...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic) ...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
addl %ebx, %eax addl %ebx, %eax
adcl %r9d, %eax /* carry */ adcl %r9d, %eax /* carry */
CFI_REMEMBER_STATE
.Lende: .Lende:
movq 2*8(%rsp), %rbx movq 2*8(%rsp), %rbx
CFI_RESTORE rbx
movq 3*8(%rsp), %r12 movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %r14 movq 4*8(%rsp), %r14
CFI_RESTORE r14
movq 5*8(%rsp), %r13 movq 5*8(%rsp), %r13
CFI_RESTORE r13
movq 6*8(%rsp), %rbp movq 6*8(%rsp), %rbp
CFI_RESTORE rbp
addq $7*8, %rsp addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
ret ret
CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */ /* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source: .Lbad_source:
...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic) ...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende jz .Lende
movl $-EFAULT, (%rax) movl $-EFAULT, (%rax)
jmp .Lende jmp .Lende
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -36,7 +35,6 @@ ...@@ -36,7 +35,6 @@
.text .text
ENTRY(__get_user_1) ENTRY(__get_user_1)
CFI_STARTPROC
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
...@@ -45,11 +43,9 @@ ENTRY(__get_user_1) ...@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
ENTRY(__get_user_2) ENTRY(__get_user_2)
CFI_STARTPROC
add $1,%_ASM_AX add $1,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -60,11 +56,9 @@ ENTRY(__get_user_2) ...@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
ENTRY(__get_user_4) ENTRY(__get_user_4)
CFI_STARTPROC
add $3,%_ASM_AX add $3,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -75,11 +69,9 @@ ENTRY(__get_user_4) ...@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_4) ENDPROC(__get_user_4)
ENTRY(__get_user_8) ENTRY(__get_user_8)
CFI_STARTPROC
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
add $7,%_ASM_AX add $7,%_ASM_AX
jc bad_get_user jc bad_get_user
...@@ -104,28 +96,23 @@ ENTRY(__get_user_8) ...@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
ASM_CLAC ASM_CLAC
ret ret
#endif #endif
CFI_ENDPROC
ENDPROC(__get_user_8) ENDPROC(__get_user_8)
bad_get_user: bad_get_user:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user) END(bad_get_user)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
bad_get_user_8: bad_get_user_8:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
xor %ecx,%ecx xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user_8) END(bad_get_user_8)
#endif #endif
......
...@@ -16,15 +16,12 @@ ...@@ -16,15 +16,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* override generic version in lib/iomap_copy.c * override generic version in lib/iomap_copy.c
*/ */
ENTRY(__iowrite32_copy) ENTRY(__iowrite32_copy)
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
rep movsd rep movsd
ret ret
CFI_ENDPROC
ENDPROC(__iowrite32_copy) ENDPROC(__iowrite32_copy)
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/dwarf2.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms) ...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
ENDPROC(memcpy_erms) ENDPROC(memcpy_erms)
ENTRY(memcpy_orig) ENTRY(memcpy_orig)
CFI_STARTPROC
movq %rdi, %rax movq %rdi, %rax
cmpq $0x20, %rdx cmpq $0x20, %rdx
...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig) ...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
.Lend: .Lend:
retq retq
CFI_ENDPROC
ENDPROC(memcpy_orig) ENDPROC(memcpy_orig)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -27,7 +26,6 @@ ...@@ -27,7 +26,6 @@
ENTRY(memmove) ENTRY(memmove)
ENTRY(__memmove) ENTRY(__memmove)
CFI_STARTPROC
/* Handle more 32 bytes in loop */ /* Handle more 32 bytes in loop */
mov %rdi, %rax mov %rdi, %rax
...@@ -207,6 +205,5 @@ ENTRY(__memmove) ...@@ -207,6 +205,5 @@ ENTRY(__memmove)
movb %r11b, (%rdi) movb %r11b, (%rdi)
13: 13:
retq retq
CFI_ENDPROC
ENDPROC(__memmove) ENDPROC(__memmove)
ENDPROC(memmove) ENDPROC(memmove)
/* Copyright 2002 Andi Kleen, SuSE Labs */ /* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -66,7 +65,6 @@ ENTRY(memset_erms) ...@@ -66,7 +65,6 @@ ENTRY(memset_erms)
ENDPROC(memset_erms) ENDPROC(memset_erms)
ENTRY(memset_orig) ENTRY(memset_orig)
CFI_STARTPROC
movq %rdi,%r10 movq %rdi,%r10
/* expand byte value */ /* expand byte value */
...@@ -78,7 +76,6 @@ ENTRY(memset_orig) ...@@ -78,7 +76,6 @@ ENTRY(memset_orig)
movl %edi,%r9d movl %edi,%r9d
andl $7,%r9d andl $7,%r9d
jnz .Lbad_alignment jnz .Lbad_alignment
CFI_REMEMBER_STATE
.Lafter_bad_alignment: .Lafter_bad_alignment:
movq %rdx,%rcx movq %rdx,%rcx
...@@ -128,7 +125,6 @@ ENTRY(memset_orig) ...@@ -128,7 +125,6 @@ ENTRY(memset_orig)
movq %r10,%rax movq %r10,%rax
ret ret
CFI_RESTORE_STATE
.Lbad_alignment: .Lbad_alignment:
cmpq $7,%rdx cmpq $7,%rdx
jbe .Lhandle_7 jbe .Lhandle_7
...@@ -139,5 +135,4 @@ ENTRY(memset_orig) ...@@ -139,5 +135,4 @@ ENTRY(memset_orig)
subq %r8,%rdx subq %r8,%rdx
jmp .Lafter_bad_alignment jmp .Lafter_bad_alignment
.Lfinal: .Lfinal:
CFI_ENDPROC
ENDPROC(memset_orig) ENDPROC(memset_orig)
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/dwarf2.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -13,9 +12,8 @@ ...@@ -13,9 +12,8 @@
*/ */
.macro op_safe_regs op .macro op_safe_regs op
ENTRY(\op\()_safe_regs) ENTRY(\op\()_safe_regs)
CFI_STARTPROC pushq %rbx
pushq_cfi_reg rbx pushq %rbp
pushq_cfi_reg rbp
movq %rdi, %r10 /* Save pointer */ movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */ xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax movl (%rdi), %eax
...@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs) ...@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
movl 20(%rdi), %ebp movl 20(%rdi), %ebp
movl 24(%rdi), %esi movl 24(%rdi), %esi
movl 28(%rdi), %edi movl 28(%rdi), %edi
CFI_REMEMBER_STATE
1: \op 1: \op
2: movl %eax, (%r10) 2: movl %eax, (%r10)
movl %r11d, %eax /* Return value */ movl %r11d, %eax /* Return value */
...@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs) ...@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
movl %ebp, 20(%r10) movl %ebp, 20(%r10)
movl %esi, 24(%r10) movl %esi, 24(%r10)
movl %edi, 28(%r10) movl %edi, 28(%r10)
popq_cfi_reg rbp popq %rbp
popq_cfi_reg rbx popq %rbx
ret ret
3: 3:
CFI_RESTORE_STATE
movl $-EIO, %r11d movl $-EIO, %r11d
jmp 2b jmp 2b
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs) ENDPROC(\op\()_safe_regs)
.endm .endm
...@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs) ...@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
.macro op_safe_regs op .macro op_safe_regs op
ENTRY(\op\()_safe_regs) ENTRY(\op\()_safe_regs)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %ebp
pushl_cfi_reg ebp pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi pushl $0 /* Return value */
pushl_cfi $0 /* Return value */ pushl %eax
pushl_cfi %eax
movl 4(%eax), %ecx movl 4(%eax), %ecx
movl 8(%eax), %edx movl 8(%eax), %edx
movl 12(%eax), %ebx movl 12(%eax), %ebx
...@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs) ...@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
movl 24(%eax), %esi movl 24(%eax), %esi
movl 28(%eax), %edi movl 28(%eax), %edi
movl (%eax), %eax movl (%eax), %eax
CFI_REMEMBER_STATE
1: \op 1: \op
2: pushl_cfi %eax 2: pushl %eax
movl 4(%esp), %eax movl 4(%esp), %eax
popl_cfi (%eax) popl (%eax)
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
movl %ecx, 4(%eax) movl %ecx, 4(%eax)
movl %edx, 8(%eax) movl %edx, 8(%eax)
movl %ebx, 12(%eax) movl %ebx, 12(%eax)
movl %ebp, 20(%eax) movl %ebp, 20(%eax)
movl %esi, 24(%eax) movl %esi, 24(%eax)
movl %edi, 28(%eax) movl %edi, 28(%eax)
popl_cfi %eax popl %eax
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg ebx popl %ebx
ret ret
3: 3:
CFI_RESTORE_STATE
movl $-EIO, 4(%esp) movl $-EIO, 4(%esp)
jmp 2b jmp 2b
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs) ENDPROC(\op\()_safe_regs)
.endm .endm
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
* return value. * return value.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -30,11 +29,9 @@ ...@@ -30,11 +29,9 @@
* as they get called from within inline assembly. * as they get called from within inline assembly.
*/ */
#define ENTER CFI_STARTPROC ; \ #define ENTER GET_THREAD_INFO(%_ASM_BX)
GET_THREAD_INFO(%_ASM_BX)
#define EXIT ASM_CLAC ; \ #define EXIT ASM_CLAC ; \
ret ; \ ret
CFI_ENDPROC
.text .text
ENTRY(__put_user_1) ENTRY(__put_user_1)
...@@ -87,7 +84,6 @@ ENTRY(__put_user_8) ...@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
ENDPROC(__put_user_8) ENDPROC(__put_user_8)
bad_put_user: bad_put_user:
CFI_STARTPROC
movl $-EFAULT,%eax movl $-EFAULT,%eax
EXIT EXIT
END(bad_put_user) END(bad_put_user)
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
...@@ -34,10 +33,10 @@ ...@@ -34,10 +33,10 @@
*/ */
#define save_common_regs \ #define save_common_regs \
pushl_cfi_reg ecx pushl %ecx
#define restore_common_regs \ #define restore_common_regs \
popl_cfi_reg ecx popl %ecx
/* Avoid uglifying the argument copying x86-64 needs to do. */ /* Avoid uglifying the argument copying x86-64 needs to do. */
.macro movq src, dst .macro movq src, dst
...@@ -64,50 +63,45 @@ ...@@ -64,50 +63,45 @@
*/ */
#define save_common_regs \ #define save_common_regs \
pushq_cfi_reg rdi; \ pushq %rdi; \
pushq_cfi_reg rsi; \ pushq %rsi; \
pushq_cfi_reg rcx; \ pushq %rcx; \
pushq_cfi_reg r8; \ pushq %r8; \
pushq_cfi_reg r9; \ pushq %r9; \
pushq_cfi_reg r10; \ pushq %r10; \
pushq_cfi_reg r11 pushq %r11
#define restore_common_regs \ #define restore_common_regs \
popq_cfi_reg r11; \ popq %r11; \
popq_cfi_reg r10; \ popq %r10; \
popq_cfi_reg r9; \ popq %r9; \
popq_cfi_reg r8; \ popq %r8; \
popq_cfi_reg rcx; \ popq %rcx; \
popq_cfi_reg rsi; \ popq %rsi; \
popq_cfi_reg rdi popq %rdi
#endif #endif
/* Fix up special calling conventions */ /* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_read_failed)
CFI_STARTPROC
save_common_regs save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi movq %rax,%rdi
call rwsem_down_read_failed call rwsem_down_read_failed
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_read_failed) ENDPROC(call_rwsem_down_read_failed)
ENTRY(call_rwsem_down_write_failed) ENTRY(call_rwsem_down_write_failed)
CFI_STARTPROC
save_common_regs save_common_regs
movq %rax,%rdi movq %rax,%rdi
call rwsem_down_write_failed call rwsem_down_write_failed
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_write_failed) ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_wake) ENTRY(call_rwsem_wake)
CFI_STARTPROC
/* do nothing if still outstanding active readers */ /* do nothing if still outstanding active readers */
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f jnz 1f
...@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake) ...@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
call rwsem_wake call rwsem_wake
restore_common_regs restore_common_regs
1: ret 1: ret
CFI_ENDPROC
ENDPROC(call_rwsem_wake) ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake) ENTRY(call_rwsem_downgrade_wake)
CFI_STARTPROC
save_common_regs save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi movq %rax,%rdi
call rwsem_downgrade_wake call rwsem_downgrade_wake
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake) ENDPROC(call_rwsem_downgrade_wake)
...@@ -6,16 +6,14 @@ ...@@ -6,16 +6,14 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* put return address in eax (arg1) */ /* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0 .macro THUNK name, func, put_ret_addr_in_eax=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC pushl %eax
pushl_cfi_reg eax pushl %ecx
pushl_cfi_reg ecx pushl %edx
pushl_cfi_reg edx
.if \put_ret_addr_in_eax .if \put_ret_addr_in_eax
/* Place EIP in the arg1 */ /* Place EIP in the arg1 */
...@@ -23,11 +21,10 @@ ...@@ -23,11 +21,10 @@
.endif .endif
call \func call \func
popl_cfi_reg edx popl %edx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg eax popl %eax
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* Subject to the GNU public license, v.2. No warranty of any kind. * Subject to the GNU public license, v.2. No warranty of any kind.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/calling.h> #include <asm/calling.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -14,27 +13,25 @@ ...@@ -14,27 +13,25 @@
.macro THUNK name, func, put_ret_addr_in_rdi=0 .macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC
/* this one pushes 9 elems, the next one would be %rIP */ /* this one pushes 9 elems, the next one would be %rIP */
pushq_cfi_reg rdi pushq %rdi
pushq_cfi_reg rsi pushq %rsi
pushq_cfi_reg rdx pushq %rdx
pushq_cfi_reg rcx pushq %rcx
pushq_cfi_reg rax pushq %rax
pushq_cfi_reg r8 pushq %r8
pushq_cfi_reg r9 pushq %r9
pushq_cfi_reg r10 pushq %r10
pushq_cfi_reg r11 pushq %r11
.if \put_ret_addr_in_rdi .if \put_ret_addr_in_rdi
/* 9*8(%rsp) is return addr on stack */ /* 9*8(%rsp) is return addr on stack */
movq_cfi_restore 9*8, rdi movq 9*8(%rsp), %rdi
.endif .endif
call \func call \func
jmp restore jmp restore
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
...@@ -57,19 +54,16 @@ ...@@ -57,19 +54,16 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \ #if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT)
CFI_STARTPROC
CFI_ADJUST_CFA_OFFSET 9*8
restore: restore:
popq_cfi_reg r11 popq %r11
popq_cfi_reg r10 popq %r10
popq_cfi_reg r9 popq %r9
popq_cfi_reg r8 popq %r8
popq_cfi_reg rax popq %rax
popq_cfi_reg rcx popq %rcx
popq_cfi_reg rdx popq %rdx
popq_cfi_reg rsi popq %rsi
popq_cfi_reg rdi popq %rdi
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(restore) _ASM_NOKPROBE(restore)
#endif #endif
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
* of the License. * of the License.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* Calling convention : * Calling convention :
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment