Commit 269af9a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-extable-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull exception table generation updates from Ingo Molnar:
 "The biggest change here is to allow the build-time sorting of the
  exception table, to speed up booting.  This is achieved by the
  architecture enabling BUILDTIME_EXTABLE_SORT.  This option is enabled
  for x86 and MIPS currently.

  On x86 a number of fixes and changes were needed to allow build-time
  sorting of the exception table, in particular a relocation invariant
  exception table format was needed.  This required the abstracting out
  of exception table protocol and the removal of 20 years of accumulated
  assumptions about the x86 exception table format.

  While at it, this tree also cleans up various other aspects of
  exception handling, such as early(er) exception handling for
  rdmsr_safe() et al.

  All in one, as the result of these changes the x86 exception code is
  now pretty nice and modern.  As an added bonus any regressions in this
  code will be early and violent crashes, so if you see any of those,
  you'll know whom to blame!"

Fix up trivial conflicts in arch/{mips,x86}/Kconfig files due to nearby
modifications of other core architecture options.

* 'x86-extable-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits)
  Revert "x86, extable: Disable presorted exception table for now"
  scripts/sortextable: Handle relative entries, and other cleanups
  x86, extable: Switch to relative exception table entries
  x86, extable: Disable presorted exception table for now
  x86, extable: Add _ASM_EXTABLE_EX() macro
  x86, extable: Remove open-coded exception table entries in arch/x86/ia32/ia32entry.S
  x86, extable: Remove open-coded exception table entries in arch/x86/include/asm/xsave.h
  x86, extable: Remove open-coded exception table entries in arch/x86/include/asm/kvm_host.h
  x86, extable: Remove the now-unused __ASM_EX_SEC macros
  x86, extable: Remove open-coded exception table entries in arch/x86/xen/xen-asm_32.S
  x86, extable: Remove open-coded exception table entries in arch/x86/um/checksum_32.S
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/usercopy_32.c
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/putuser.S
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/getuser.S
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/csum-copy_64.S
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/copy_user_nocache_64.S
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/copy_user_64.S
  x86, extable: Remove open-coded exception table entries in arch/x86/lib/checksum_32.S
  x86, extable: Remove open-coded exception table entries in arch/x86/kernel/test_rodata.c
  x86, extable: Remove open-coded exception table entries in arch/x86/kernel/entry_64.S
  ...
parents 8ca038dc 8b5ad472
...@@ -796,6 +796,10 @@ quiet_cmd_vmlinux_version = GEN .version ...@@ -796,6 +796,10 @@ quiet_cmd_vmlinux_version = GEN .version
quiet_cmd_sysmap = SYSMAP quiet_cmd_sysmap = SYSMAP
cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
# Sort exception table at build time
quiet_cmd_sortextable = SORTEX
cmd_sortextable = $(objtree)/scripts/sortextable
# Link of vmlinux # Link of vmlinux
# If CONFIG_KALLSYMS is set .version is already updated # If CONFIG_KALLSYMS is set .version is already updated
# Generate System.map and verify that the content is consistent # Generate System.map and verify that the content is consistent
...@@ -808,6 +812,12 @@ define rule_vmlinux__ ...@@ -808,6 +812,12 @@ define rule_vmlinux__
$(call cmd,vmlinux__) $(call cmd,vmlinux__)
$(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
$(if $(CONFIG_BUILDTIME_EXTABLE_SORT), \
$(Q)$(if $($(quiet)cmd_sortextable), \
echo ' $($(quiet)cmd_sortextable) vmlinux' &&) \
$(cmd_sortextable) vmlinux)
$(Q)$(if $($(quiet)cmd_sysmap), \ $(Q)$(if $($(quiet)cmd_sysmap), \
echo ' $($(quiet)cmd_sysmap) System.map' &&) \ echo ' $($(quiet)cmd_sysmap) System.map' &&) \
$(cmd_sysmap) $@ System.map; \ $(cmd_sysmap) $@ System.map; \
......
...@@ -30,6 +30,7 @@ config MIPS ...@@ -30,6 +30,7 @@ config MIPS
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select BUILDTIME_EXTABLE_SORT
menu "Machine selection" menu "Machine selection"
......
...@@ -83,6 +83,7 @@ config X86 ...@@ -83,6 +83,7 @@ config X86
select DCACHE_WORD_ACCESS select DCACHE_WORD_ACCESS
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select BUILDTIME_EXTABLE_SORT
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS) def_bool (KPROBES || PERF_EVENTS)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/asm.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -146,9 +147,7 @@ ENTRY(ia32_sysenter_target) ...@@ -146,9 +147,7 @@ ENTRY(ia32_sysenter_target)
/* no need to do an access_ok check here because rbp has been /* no need to do an access_ok check here because rbp has been
32bit zero extended */ 32bit zero extended */
1: movl (%rbp),%ebp 1: movl (%rbp),%ebp
.section __ex_table,"a" _ASM_EXTABLE(1b,ia32_badarg)
.quad 1b,ia32_badarg
.previous
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
...@@ -303,9 +302,7 @@ ENTRY(ia32_cstar_target) ...@@ -303,9 +302,7 @@ ENTRY(ia32_cstar_target)
32bit zero extended */ 32bit zero extended */
/* hardware stack frame is complete now */ /* hardware stack frame is complete now */
1: movl (%r8),%r9d 1: movl (%r8),%r9d
.section __ex_table,"a" _ASM_EXTABLE(1b,ia32_badarg)
.quad 1b,ia32_badarg
.previous
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
......
...@@ -4,11 +4,9 @@ ...@@ -4,11 +4,9 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define __ASM_FORM(x) x # define __ASM_FORM(x) x
# define __ASM_FORM_COMMA(x) x, # define __ASM_FORM_COMMA(x) x,
# define __ASM_EX_SEC .section __ex_table, "a"
#else #else
# define __ASM_FORM(x) " " #x " " # define __ASM_FORM(x) " " #x " "
# define __ASM_FORM_COMMA(x) " " #x "," # define __ASM_FORM_COMMA(x) " " #x ","
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -42,17 +40,33 @@ ...@@ -42,17 +40,33 @@
/* Exception table entry */ /* Exception table entry */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \ # define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC ; \ .pushsection "__ex_table","a" ; \
_ASM_ALIGN ; \ .balign 8 ; \
_ASM_PTR from , to ; \ .long (from) - . ; \
.previous .long (to) - . ; \
.popsection
# define _ASM_EXTABLE_EX(from,to) \
.pushsection "__ex_table","a" ; \
.balign 8 ; \
.long (from) - . ; \
.long (to) - . + 0x7ffffff0 ; \
.popsection
#else #else
# define _ASM_EXTABLE(from,to) \ # define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC \ " .pushsection \"__ex_table\",\"a\"\n" \
_ASM_ALIGN "\n" \ " .balign 8\n" \
_ASM_PTR #from "," #to "\n" \ " .long (" #from ") - .\n" \
" .previous\n" " .long (" #to ") - .\n" \
" .popsection\n"
# define _ASM_EXTABLE_EX(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 8\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - . + 0x7ffffff0\n" \
" .popsection\n"
#endif #endif
#endif /* _ASM_X86_ASM_H */ #endif /* _ASM_X86_ASM_H */
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/asm.h>
#define KVM_MAX_VCPUS 254 #define KVM_MAX_VCPUS 254
#define KVM_SOFT_MAX_VCPUS 160 #define KVM_SOFT_MAX_VCPUS 160
...@@ -921,9 +922,7 @@ extern bool kvm_rebooting; ...@@ -921,9 +922,7 @@ extern bool kvm_rebooting;
__ASM_SIZE(push) " $666b \n\t" \ __ASM_SIZE(push) " $666b \n\t" \
"call kvm_spurious_fault \n\t" \ "call kvm_spurious_fault \n\t" \
".popsection \n\t" \ ".popsection \n\t" \
".pushsection __ex_table, \"a\" \n\t" \ _ASM_EXTABLE(666b, 667b)
_ASM_PTR " 666b, 667b \n\t" \
".popsection"
#define __kvm_handle_fault_on_reboot(insn) \ #define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "") ____kvm_handle_fault_on_reboot(insn, "")
......
...@@ -169,14 +169,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) ...@@ -169,14 +169,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
return native_write_msr_safe(msr, low, high); return native_write_msr_safe(msr, low, high);
} }
/* /* rdmsr with exception handling */
* rdmsr with exception handling.
*
* Please note that the exception handling works only after we've
* switched to the "smart" #GP handler in trap_init() which knows about
* exception tables - using this macro earlier than that causes machine
* hangs on boxes which do not implement the @msr in the first argument.
*/
#define rdmsr_safe(msr, p1, p2) \ #define rdmsr_safe(msr, p1, p2) \
({ \ ({ \
int __err; \ int __err; \
......
...@@ -87,7 +87,11 @@ ...@@ -87,7 +87,11 @@
#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 #define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
#define P6_NOP5_ATOMIC P6_NOP5 #define P6_NOP5_ATOMIC P6_NOP5
#ifdef __ASSEMBLY__
#define _ASM_MK_NOP(x) .byte x
#else
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" #define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
#endif
#if defined(CONFIG_MK7) #if defined(CONFIG_MK7)
#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) #define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
......
...@@ -1023,10 +1023,8 @@ extern void default_banner(void); ...@@ -1023,10 +1023,8 @@ extern void default_banner(void);
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
) )
#define GET_CR2_INTO_RCX \ #define GET_CR2_INTO_RAX \
call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
movq %rax, %rcx; \
xorq %rax, %rax;
#define PARAVIRT_ADJUST_EXCEPTION_FRAME \ #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
......
...@@ -205,13 +205,15 @@ ...@@ -205,13 +205,15 @@
#define IDT_ENTRIES 256 #define IDT_ENTRIES 256
#define NUM_EXCEPTION_VECTORS 32 #define NUM_EXCEPTION_VECTORS 32
/* Bitmask of exception vectors which push an error code on the stack */
#define EXCEPTION_ERRCODE_MASK 0x00027d00
#define GDT_SIZE (GDT_ENTRIES * 8) #define GDT_SIZE (GDT_ENTRIES * 8)
#define GDT_ENTRY_TLS_ENTRIES 3 #define GDT_ENTRY_TLS_ENTRIES 3
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
/* /*
* Load a segment. Fall back on loading the zero * Load a segment. Fall back on loading the zero
......
...@@ -79,11 +79,12 @@ ...@@ -79,11 +79,12 @@
#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
/* /*
* The exception table consists of pairs of addresses: the first is the * The exception table consists of pairs of addresses relative to the
* address of an instruction that is allowed to fault, and the second is * exception table enty itself: the first is the address of an
* the address at which the program should continue. No registers are * instruction that is allowed to fault, and the second is the address
* modified, so it is entirely up to the continuation code to figure out * at which the program should continue. No registers are modified,
* what to do. * so it is entirely up to the continuation code to figure out what to
* do.
* *
* All the routines below use bits of fixup code that are out of line * All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well, * with the main instruction path. This means when everything is well,
...@@ -92,10 +93,14 @@ ...@@ -92,10 +93,14 @@
*/ */
struct exception_table_entry { struct exception_table_entry {
unsigned long insn, fixup; int insn, fixup;
}; };
/* This is not the generic standard exception_table_entry format */
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
extern int fixup_exception(struct pt_regs *regs); extern int fixup_exception(struct pt_regs *regs);
extern int early_fixup_exception(unsigned long *ip);
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -202,8 +207,8 @@ extern int __get_user_bad(void); ...@@ -202,8 +207,8 @@ extern int __get_user_bad(void);
asm volatile("1: movl %%eax,0(%1)\n" \ asm volatile("1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \ "2: movl %%edx,4(%1)\n" \
"3:\n" \ "3:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \ _ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE(2b, 3b - 2b) \ _ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr)) : : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \ #define __put_user_x8(x, ptr, __ret_pu) \
...@@ -408,7 +413,7 @@ do { \ ...@@ -408,7 +413,7 @@ do { \
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \ _ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr))) : ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \
...@@ -450,7 +455,7 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -450,7 +455,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \ asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \ _ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr))) : : ltype(x), "m" (__m(addr)))
/* /*
......
...@@ -80,10 +80,7 @@ static inline int xsave_user(struct xsave_struct __user *buf) ...@@ -80,10 +80,7 @@ static inline int xsave_user(struct xsave_struct __user *buf)
"3: movl $-1,%[err]\n" "3: movl $-1,%[err]\n"
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(1b,3b)
_ASM_ALIGN "\n"
_ASM_PTR "1b,3b\n"
".previous"
: [err] "=r" (err) : [err] "=r" (err)
: "D" (buf), "a" (-1), "d" (-1), "0" (0) : "D" (buf), "a" (-1), "d" (-1), "0" (0)
: "memory"); : "memory");
...@@ -106,10 +103,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) ...@@ -106,10 +103,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
"3: movl $-1,%[err]\n" "3: movl $-1,%[err]\n"
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(1b,3b)
_ASM_ALIGN "\n"
_ASM_PTR "1b,3b\n"
".previous"
: [err] "=r" (err) : [err] "=r" (err)
: "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
: "memory"); /* memory required? */ : "memory"); /* memory required? */
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include <asm/irq_vectors.h> #include <asm/irq_vectors.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h> #include <linux/elf-em.h>
...@@ -151,10 +152,8 @@ ...@@ -151,10 +152,8 @@
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
99: movl $0, (%esp) 99: movl $0, (%esp)
jmp 98b jmp 98b
.section __ex_table, "a"
.align 4
.long 98b, 99b
.popsection .popsection
_ASM_EXTABLE(98b,99b)
.endm .endm
.macro PTGS_TO_GS .macro PTGS_TO_GS
...@@ -164,10 +163,8 @@ ...@@ -164,10 +163,8 @@
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
99: movl $0, PT_GS(%esp) 99: movl $0, PT_GS(%esp)
jmp 98b jmp 98b
.section __ex_table, "a"
.align 4
.long 98b, 99b
.popsection .popsection
_ASM_EXTABLE(98b,99b)
.endm .endm
.macro GS_TO_REG reg .macro GS_TO_REG reg
...@@ -249,12 +246,10 @@ ...@@ -249,12 +246,10 @@
jmp 2b jmp 2b
6: movl $0, (%esp) 6: movl $0, (%esp)
jmp 3b jmp 3b
.section __ex_table, "a"
.align 4
.long 1b, 4b
.long 2b, 5b
.long 3b, 6b
.popsection .popsection
_ASM_EXTABLE(1b,4b)
_ASM_EXTABLE(2b,5b)
_ASM_EXTABLE(3b,6b)
POP_GS_EX POP_GS_EX
.endm .endm
...@@ -415,10 +410,7 @@ sysenter_past_esp: ...@@ -415,10 +410,7 @@ sysenter_past_esp:
jae syscall_fault jae syscall_fault
1: movl (%ebp),%ebp 1: movl (%ebp),%ebp
movl %ebp,PT_EBP(%esp) movl %ebp,PT_EBP(%esp)
.section __ex_table,"a" _ASM_EXTABLE(1b,syscall_fault)
.align 4
.long 1b,syscall_fault
.previous
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
...@@ -485,10 +477,8 @@ sysexit_audit: ...@@ -485,10 +477,8 @@ sysexit_audit:
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
2: movl $0,PT_FS(%esp) 2: movl $0,PT_FS(%esp)
jmp 1b jmp 1b
.section __ex_table,"a"
.align 4
.long 1b,2b
.popsection .popsection
_ASM_EXTABLE(1b,2b)
PTGS_TO_GS_EX PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target) ENDPROC(ia32_sysenter_target)
...@@ -543,10 +533,7 @@ ENTRY(iret_exc) ...@@ -543,10 +533,7 @@ ENTRY(iret_exc)
pushl $do_iret_error pushl $do_iret_error
jmp error_code jmp error_code
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(irq_return,iret_exc)
.align 4
.long irq_return,iret_exc
.previous
CFI_RESTORE_STATE CFI_RESTORE_STATE
ldt_ss: ldt_ss:
...@@ -901,10 +888,7 @@ END(device_not_available) ...@@ -901,10 +888,7 @@ END(device_not_available)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_iret) ENTRY(native_iret)
iret iret
.section __ex_table,"a" _ASM_EXTABLE(native_iret, iret_exc)
.align 4
.long native_iret, iret_exc
.previous
END(native_iret) END(native_iret)
ENTRY(native_irq_enable_sysexit) ENTRY(native_irq_enable_sysexit)
...@@ -1093,13 +1077,10 @@ ENTRY(xen_failsafe_callback) ...@@ -1093,13 +1077,10 @@ ENTRY(xen_failsafe_callback)
movl %eax,16(%esp) movl %eax,16(%esp)
jmp 4b jmp 4b
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(1b,6b)
.align 4 _ASM_EXTABLE(2b,7b)
.long 1b,6b _ASM_EXTABLE(3b,8b)
.long 2b,7b _ASM_EXTABLE(4b,9b)
.long 3b,8b
.long 4b,9b
.previous
ENDPROC(xen_failsafe_callback) ENDPROC(xen_failsafe_callback)
BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/asm.h>
#include <linux/err.h> #include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
...@@ -900,18 +901,12 @@ restore_args: ...@@ -900,18 +901,12 @@ restore_args:
irq_return: irq_return:
INTERRUPT_RETURN INTERRUPT_RETURN
_ASM_EXTABLE(irq_return, bad_iret)
.section __ex_table, "a"
.quad irq_return, bad_iret
.previous
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_iret) ENTRY(native_iret)
iretq iretq
_ASM_EXTABLE(native_iret, bad_iret)
.section __ex_table,"a"
.quad native_iret, bad_iret
.previous
#endif #endif
.section .fixup,"ax" .section .fixup,"ax"
...@@ -1181,10 +1176,7 @@ gs_change: ...@@ -1181,10 +1176,7 @@ gs_change:
CFI_ENDPROC CFI_ENDPROC
END(native_load_gs_index) END(native_load_gs_index)
.section __ex_table,"a" _ASM_EXTABLE(gs_change,bad_gs)
.align 8
.quad gs_change,bad_gs
.previous
.section .fixup,"ax" .section .fixup,"ax"
/* running with kernelgs */ /* running with kernelgs */
bad_gs: bad_gs:
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h>
/* Physical address */ /* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET) #define pa(X) ((X) - __PAGE_OFFSET)
...@@ -363,28 +364,23 @@ default_entry: ...@@ -363,28 +364,23 @@ default_entry:
pushl $0 pushl $0
popfl popfl
#ifdef CONFIG_SMP
cmpb $0, ready
jnz checkCPUtype
#endif /* CONFIG_SMP */
/* /*
* start system 32-bit setup. We need to re-do some of the things done * start system 32-bit setup. We need to re-do some of the things done
* in 16-bit mode for the "real" operations. * in 16-bit mode for the "real" operations.
*/ */
call setup_idt movl setup_once_ref,%eax
andl %eax,%eax
checkCPUtype: jz 1f # Did we do this already?
call *%eax
movl $-1,X86_CPUID # -1 for no CPUID initially 1:
/* check if it is 486 or 386. */ /* check if it is 486 or 386. */
/* /*
* XXX - this does a lot of unnecessary setup. Alignment checks don't * XXX - this does a lot of unnecessary setup. Alignment checks don't
* apply at our cpl of 0 and the stack ought to be aligned already, and * apply at our cpl of 0 and the stack ought to be aligned already, and
* we don't need to preserve eflags. * we don't need to preserve eflags.
*/ */
movl $-1,X86_CPUID # -1 for no CPUID initially
movb $3,X86 # at least 386 movb $3,X86 # at least 386
pushfl # push EFLAGS pushfl # push EFLAGS
popl %eax # get EFLAGS popl %eax # get EFLAGS
...@@ -450,21 +446,6 @@ is386: movl $2,%ecx # set MP ...@@ -450,21 +446,6 @@ is386: movl $2,%ecx # set MP
movl $(__KERNEL_PERCPU), %eax movl $(__KERNEL_PERCPU), %eax
movl %eax,%fs # set this cpu's percpu movl %eax,%fs # set this cpu's percpu
#ifdef CONFIG_CC_STACKPROTECTOR
/*
* The linker can't handle this by relocation. Manually set
* base address in stack canary segment descriptor.
*/
cmpb $0,ready
jne 1f
movl $gdt_page,%eax
movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
1:
#endif
movl $(__KERNEL_STACK_CANARY),%eax movl $(__KERNEL_STACK_CANARY),%eax
movl %eax,%gs movl %eax,%gs
...@@ -473,7 +454,6 @@ is386: movl $2,%ecx # set MP ...@@ -473,7 +454,6 @@ is386: movl $2,%ecx # set MP
cld # gcc2 wants the direction flag cleared at all times cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder pushl $0 # fake return address for unwinder
movb $1, ready
jmp *(initial_code) jmp *(initial_code)
/* /*
...@@ -495,81 +475,122 @@ check_x87: ...@@ -495,81 +475,122 @@ check_x87:
.byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */
ret ret
#include "verify_cpu.S"
/* /*
* setup_idt * setup_once
* *
* sets up a idt with 256 entries pointing to * The setup work we only want to run on the BSP.
* ignore_int, interrupt gates. It doesn't actually load
* idt - that can be done only after paging has been enabled
* and the kernel moved to PAGE_OFFSET. Interrupts
* are enabled elsewhere, when we can be relatively
* sure everything is ok.
* *
* Warning: %esi is live across this function. * Warning: %esi is live across this function.
*/ */
setup_idt: __INIT
lea ignore_int,%edx setup_once:
movl $(__KERNEL_CS << 16),%eax /*
movw %dx,%ax /* selector = 0x0010 = cs */ * Set up a idt with 256 entries pointing to ignore_int,
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ * interrupt gates. It doesn't actually load idt - that needs
* to be done on each CPU. Interrupts are enabled elsewhere,
* when we can be relatively sure everything is ok.
*/
lea idt_table,%edi movl $idt_table,%edi
mov $256,%ecx movl $early_idt_handlers,%eax
rp_sidt: movl $NUM_EXCEPTION_VECTORS,%ecx
1:
movl %eax,(%edi) movl %eax,(%edi)
movl %edx,4(%edi) movl %eax,4(%edi)
/* interrupt gate, dpl=0, present */
movl $(0x8E000000 + __KERNEL_CS),2(%edi)
addl $9,%eax
addl $8,%edi addl $8,%edi
dec %ecx loop 1b
jne rp_sidt
.macro set_early_handler handler,trapno movl $256 - NUM_EXCEPTION_VECTORS,%ecx
lea \handler,%edx movl $ignore_int,%edx
movl $(__KERNEL_CS << 16),%eax movl $(__KERNEL_CS << 16),%eax
movw %dx,%ax movw %dx,%ax /* selector = 0x0010 = cs */
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
lea idt_table,%edi 2:
movl %eax,8*\trapno(%edi) movl %eax,(%edi)
movl %edx,8*\trapno+4(%edi) movl %edx,4(%edi)
.endm addl $8,%edi
loop 2b
set_early_handler handler=early_divide_err,trapno=0 #ifdef CONFIG_CC_STACKPROTECTOR
set_early_handler handler=early_illegal_opcode,trapno=6 /*
set_early_handler handler=early_protection_fault,trapno=13 * Configure the stack canary. The linker can't handle this by
set_early_handler handler=early_page_fault,trapno=14 * relocation. Manually set base address in stack canary
* segment descriptor.
*/
movl $gdt_page,%eax
movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
#endif
andl $0,setup_once_ref /* Once is enough, thanks */
ret ret
early_divide_err: ENTRY(early_idt_handlers)
xor %edx,%edx # 36(%esp) %eflags
pushl $0 /* fake errcode */ # 32(%esp) %cs
jmp early_fault # 28(%esp) %eip
# 24(%rsp) error code
i = 0
.rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushl $0 # Dummy error code, to make stack frame uniform
.endif
pushl $i # 20(%esp) Vector number
jmp early_idt_handler
i = i + 1
.endr
ENDPROC(early_idt_handlers)
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
cld
cmpl $2,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
early_illegal_opcode: push %eax # 16(%esp)
movl $6,%edx push %ecx # 12(%esp)
pushl $0 /* fake errcode */ push %edx # 8(%esp)
jmp early_fault push %ds # 4(%esp)
push %es # 0(%esp)
movl $(__KERNEL_DS),%eax
movl %eax,%ds
movl %eax,%es
early_protection_fault: cmpl $(__KERNEL_CS),32(%esp)
movl $13,%edx jne 10f
jmp early_fault
early_page_fault: leal 28(%esp),%eax # Pointer to %eip
movl $14,%edx call early_fixup_exception
jmp early_fault andl %eax,%eax
jnz ex_entry /* found an exception entry */
early_fault: 10:
cld
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
pusha xorl %eax,%eax
movl $(__KERNEL_DS),%eax movw %ax,2(%esp) /* clean up the segment values on some cpus */
movl %eax,%ds movw %ax,6(%esp)
movl %eax,%es movw %ax,34(%esp)
cmpl $2,early_recursion_flag leal 40(%esp),%eax
je hlt_loop pushl %eax /* %esp before the exception */
incl early_recursion_flag pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
movl %cr2,%eax movl %cr2,%eax
pushl %eax pushl %eax
pushl %edx /* trapno */ pushl (20+6*4)(%esp) /* trapno */
pushl $fault_msg pushl $fault_msg
call printk call printk
#endif #endif
...@@ -578,6 +599,17 @@ hlt_loop: ...@@ -578,6 +599,17 @@ hlt_loop:
hlt hlt
jmp hlt_loop jmp hlt_loop
ex_entry:
pop %es
pop %ds
pop %edx
pop %ecx
pop %eax
addl $8,%esp /* drop vector number and error code */
decl %ss:early_recursion_flag
iret
ENDPROC(early_idt_handler)
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
ALIGN ALIGN
ignore_int: ignore_int:
...@@ -611,13 +643,18 @@ ignore_int: ...@@ -611,13 +643,18 @@ ignore_int:
popl %eax popl %eax
#endif #endif
iret iret
ENDPROC(ignore_int)
__INITDATA
.align 4
early_recursion_flag:
.long 0
#include "verify_cpu.S" __REFDATA
.align 4
__REFDATA
.align 4
ENTRY(initial_code) ENTRY(initial_code)
.long i386_start_kernel .long i386_start_kernel
ENTRY(setup_once_ref)
.long setup_once
/* /*
* BSS section * BSS section
...@@ -670,22 +707,19 @@ ENTRY(initial_page_table) ...@@ -670,22 +707,19 @@ ENTRY(initial_page_table)
ENTRY(stack_start) ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE .long init_thread_union+THREAD_SIZE
early_recursion_flag: __INITRODATA
.long 0
ready: .byte 0
int_msg: int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n" .asciz "Unknown interrupt or fault at: %p %p %p\n"
fault_msg: fault_msg:
/* fault info: */ /* fault info: */
.ascii "BUG: Int %d: CR2 %p\n" .ascii "BUG: Int %d: CR2 %p\n"
/* pusha regs: */ /* regs pushed in early_idt_handler: */
.ascii " EDI %p ESI %p EBP %p ESP %p\n" .ascii " EDI %p ESI %p EBP %p EBX %p\n"
.ascii " EBX %p EDX %p ECX %p EAX %p\n" .ascii " ESP %p ES %p DS %p\n"
.ascii " EDX %p ECX %p EAX %p\n"
/* fault frame: */ /* fault frame: */
.ascii " err %p EIP %p CS %p flg %p\n" .ascii " vec %p err %p EIP %p CS %p flg %p\n"
.ascii "Stack: %p %p %p %p %p %p %p %p\n" .ascii "Stack: %p %p %p %p %p %p %p %p\n"
.ascii " %p %p %p %p %p %p %p %p\n" .ascii " %p %p %p %p %p %p %p %p\n"
.asciz " %p %p %p %p %p %p %p %p\n" .asciz " %p %p %p %p %p %p %p %p\n"
...@@ -699,6 +733,7 @@ fault_msg: ...@@ -699,6 +733,7 @@ fault_msg:
* segment size, and 32-bit linear address value: * segment size, and 32-bit linear address value:
*/ */
.data
.globl boot_gdt_descr .globl boot_gdt_descr
.globl idt_descr .globl idt_descr
......
...@@ -19,12 +19,15 @@ ...@@ -19,12 +19,15 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h>
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
#else #else
#define GET_CR2_INTO_RCX movq %cr2, %rcx #define GET_CR2_INTO(reg) movq %cr2, reg
#define INTERRUPT_RETURN iretq
#endif #endif
/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
...@@ -270,36 +273,56 @@ bad_address: ...@@ -270,36 +273,56 @@ bad_address:
jmp bad_address jmp bad_address
.section ".init.text","ax" .section ".init.text","ax"
#ifdef CONFIG_EARLY_PRINTK
.globl early_idt_handlers .globl early_idt_handlers
early_idt_handlers: early_idt_handlers:
# 104(%rsp) %rflags
# 96(%rsp) %cs
# 88(%rsp) %rip
# 80(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
movl $i, %esi .if (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushq $0 # Dummy error code, to make stack frame uniform
.endif
pushq $i # 72(%rsp) Vector number
jmp early_idt_handler jmp early_idt_handler
i = i + 1 i = i + 1
.endr .endr
#endif
ENTRY(early_idt_handler) ENTRY(early_idt_handler)
#ifdef CONFIG_EARLY_PRINTK cld
cmpl $2,early_recursion_flag(%rip) cmpl $2,early_recursion_flag(%rip)
jz 1f jz 1f
incl early_recursion_flag(%rip) incl early_recursion_flag(%rip)
GET_CR2_INTO_RCX
movq %rcx,%r9 pushq %rax # 64(%rsp)
xorl %r8d,%r8d # zero for error code pushq %rcx # 56(%rsp)
movl %esi,%ecx # get vector number pushq %rdx # 48(%rsp)
# Test %ecx against mask of vectors that push error code. pushq %rsi # 40(%rsp)
cmpl $31,%ecx pushq %rdi # 32(%rsp)
ja 0f pushq %r8 # 24(%rsp)
movl $1,%eax pushq %r9 # 16(%rsp)
salq %cl,%rax pushq %r10 # 8(%rsp)
testl $0x27d00,%eax pushq %r11 # 0(%rsp)
je 0f
popq %r8 # get error code cmpl $__KERNEL_CS,96(%rsp)
0: movq 0(%rsp),%rcx # get ip jne 10f
movq 8(%rsp),%rdx # get cs
leaq 88(%rsp),%rdi # Pointer to %rip
call early_fixup_exception
andl %eax,%eax
jnz 20f # Found an exception entry
10:
#ifdef CONFIG_EARLY_PRINTK
GET_CR2_INTO(%r9) # can clobber any volatile register if pv
movl 80(%rsp),%r8d # error code
movl 72(%rsp),%esi # vector number
movl 96(%rsp),%edx # %cs
movq 88(%rsp),%rcx # %rip
xorl %eax,%eax xorl %eax,%eax
leaq early_idt_msg(%rip),%rdi leaq early_idt_msg(%rip),%rdi
call early_printk call early_printk
...@@ -308,17 +331,32 @@ ENTRY(early_idt_handler) ...@@ -308,17 +331,32 @@ ENTRY(early_idt_handler)
call dump_stack call dump_stack
#ifdef CONFIG_KALLSYMS #ifdef CONFIG_KALLSYMS
leaq early_idt_ripmsg(%rip),%rdi leaq early_idt_ripmsg(%rip),%rdi
movq 0(%rsp),%rsi # get rip again movq 40(%rsp),%rsi # %rip again
call __print_symbol call __print_symbol
#endif #endif
#endif /* EARLY_PRINTK */ #endif /* EARLY_PRINTK */
1: hlt 1: hlt
jmp 1b jmp 1b
#ifdef CONFIG_EARLY_PRINTK 20: # Exception table entry found
popq %r11
popq %r10
popq %r9
popq %r8
popq %rdi
popq %rsi
popq %rdx
popq %rcx
popq %rax
addq $16,%rsp # drop vector number and error code
decl early_recursion_flag(%rip)
INTERRUPT_RETURN
.balign 4
early_recursion_flag: early_recursion_flag:
.long 0 .long 0
#ifdef CONFIG_EARLY_PRINTK
early_idt_msg: early_idt_msg:
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
early_idt_ripmsg: early_idt_ripmsg:
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/asm.h>
int rodata_test(void) int rodata_test(void)
{ {
...@@ -42,14 +43,7 @@ int rodata_test(void) ...@@ -42,14 +43,7 @@ int rodata_test(void)
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"2: jmp 1b\n" "2: jmp 1b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(0b,2b)
" .align 16\n"
#ifdef CONFIG_X86_32
" .long 0b,2b\n"
#else
" .quad 0b,2b\n"
#endif
".previous"
: [rslt] "=r" (result) : [rslt] "=r" (result)
: [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL) : [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL)
); );
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h>
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
...@@ -282,15 +283,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, ...@@ -282,15 +283,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define SRC(y...) \ #define SRC(y...) \
9999: y; \ 9999: y; \
.section __ex_table, "a"; \ _ASM_EXTABLE(9999b, 6001f)
.long 9999b, 6001f ; \
.previous
#define DST(y...) \ #define DST(y...) \
9999: y; \ 9999: y; \
.section __ex_table, "a"; \ _ASM_EXTABLE(9999b, 6002f)
.long 9999b, 6002f ; \
.previous
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h>
/* /*
* By placing feature2 after feature1 in altinstructions section, we logically * By placing feature2 after feature1 in altinstructions section, we logically
...@@ -63,11 +64,8 @@ ...@@ -63,11 +64,8 @@
jmp copy_user_handle_tail jmp copy_user_handle_tail
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(100b,103b)
.align 8 _ASM_EXTABLE(101b,103b)
.quad 100b,103b
.quad 101b,103b
.previous
#endif #endif
.endm .endm
...@@ -191,29 +189,26 @@ ENTRY(copy_user_generic_unrolled) ...@@ -191,29 +189,26 @@ ENTRY(copy_user_generic_unrolled)
60: jmp copy_user_handle_tail /* ecx is zerorest also */ 60: jmp copy_user_handle_tail /* ecx is zerorest also */
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(1b,30b)
.align 8 _ASM_EXTABLE(2b,30b)
.quad 1b,30b _ASM_EXTABLE(3b,30b)
.quad 2b,30b _ASM_EXTABLE(4b,30b)
.quad 3b,30b _ASM_EXTABLE(5b,30b)
.quad 4b,30b _ASM_EXTABLE(6b,30b)
.quad 5b,30b _ASM_EXTABLE(7b,30b)
.quad 6b,30b _ASM_EXTABLE(8b,30b)
.quad 7b,30b _ASM_EXTABLE(9b,30b)
.quad 8b,30b _ASM_EXTABLE(10b,30b)
.quad 9b,30b _ASM_EXTABLE(11b,30b)
.quad 10b,30b _ASM_EXTABLE(12b,30b)
.quad 11b,30b _ASM_EXTABLE(13b,30b)
.quad 12b,30b _ASM_EXTABLE(14b,30b)
.quad 13b,30b _ASM_EXTABLE(15b,30b)
.quad 14b,30b _ASM_EXTABLE(16b,30b)
.quad 15b,30b _ASM_EXTABLE(18b,40b)
.quad 16b,30b _ASM_EXTABLE(19b,40b)
.quad 18b,40b _ASM_EXTABLE(21b,50b)
.quad 19b,40b _ASM_EXTABLE(22b,50b)
.quad 21b,50b
.quad 22b,50b
.previous
CFI_ENDPROC CFI_ENDPROC
ENDPROC(copy_user_generic_unrolled) ENDPROC(copy_user_generic_unrolled)
...@@ -259,11 +254,8 @@ ENTRY(copy_user_generic_string) ...@@ -259,11 +254,8 @@ ENTRY(copy_user_generic_string)
jmp copy_user_handle_tail jmp copy_user_handle_tail
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(1b,11b)
.align 8 _ASM_EXTABLE(3b,12b)
.quad 1b,11b
.quad 3b,12b
.previous
CFI_ENDPROC CFI_ENDPROC
ENDPROC(copy_user_generic_string) ENDPROC(copy_user_generic_string)
...@@ -294,9 +286,6 @@ ENTRY(copy_user_enhanced_fast_string) ...@@ -294,9 +286,6 @@ ENTRY(copy_user_enhanced_fast_string)
jmp copy_user_handle_tail jmp copy_user_handle_tail
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(1b,12b)
.align 8
.quad 1b,12b
.previous
CFI_ENDPROC CFI_ENDPROC
ENDPROC(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string)
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm.h>
.macro ALIGN_DESTINATION .macro ALIGN_DESTINATION
#ifdef FIX_ALIGNMENT #ifdef FIX_ALIGNMENT
...@@ -36,11 +37,8 @@ ...@@ -36,11 +37,8 @@
jmp copy_user_handle_tail jmp copy_user_handle_tail
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(100b,103b)
.align 8 _ASM_EXTABLE(101b,103b)
.quad 100b,103b
.quad 101b,103b
.previous
#endif #endif
.endm .endm
...@@ -111,27 +109,25 @@ ENTRY(__copy_user_nocache) ...@@ -111,27 +109,25 @@ ENTRY(__copy_user_nocache)
jmp copy_user_handle_tail jmp copy_user_handle_tail
.previous .previous
.section __ex_table,"a" _ASM_EXTABLE(1b,30b)
.quad 1b,30b _ASM_EXTABLE(2b,30b)
.quad 2b,30b _ASM_EXTABLE(3b,30b)
.quad 3b,30b _ASM_EXTABLE(4b,30b)
.quad 4b,30b _ASM_EXTABLE(5b,30b)
.quad 5b,30b _ASM_EXTABLE(6b,30b)
.quad 6b,30b _ASM_EXTABLE(7b,30b)
.quad 7b,30b _ASM_EXTABLE(8b,30b)
.quad 8b,30b _ASM_EXTABLE(9b,30b)
.quad 9b,30b _ASM_EXTABLE(10b,30b)
.quad 10b,30b _ASM_EXTABLE(11b,30b)
.quad 11b,30b _ASM_EXTABLE(12b,30b)
.quad 12b,30b _ASM_EXTABLE(13b,30b)
.quad 13b,30b _ASM_EXTABLE(14b,30b)
.quad 14b,30b _ASM_EXTABLE(15b,30b)
.quad 15b,30b _ASM_EXTABLE(16b,30b)
.quad 16b,30b _ASM_EXTABLE(18b,40b)
.quad 18b,40b _ASM_EXTABLE(19b,40b)
.quad 19b,40b _ASM_EXTABLE(21b,50b)
.quad 21b,50b _ASM_EXTABLE(22b,50b)
.quad 22b,50b
.previous
CFI_ENDPROC CFI_ENDPROC
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h>
/* /*
* Checksum copy with exception handling. * Checksum copy with exception handling.
...@@ -31,26 +32,17 @@ ...@@ -31,26 +32,17 @@
.macro source .macro source
10: 10:
.section __ex_table, "a" _ASM_EXTABLE(10b, .Lbad_source)
.align 8
.quad 10b, .Lbad_source
.previous
.endm .endm
.macro dest .macro dest
20: 20:
.section __ex_table, "a" _ASM_EXTABLE(20b, .Lbad_dest)
.align 8
.quad 20b, .Lbad_dest
.previous
.endm .endm
.macro ignore L=.Lignore .macro ignore L=.Lignore
30: 30:
.section __ex_table, "a" _ASM_EXTABLE(30b, \L)
.align 8
.quad 30b, \L
.previous
.endm .endm
......
...@@ -95,10 +95,9 @@ bad_get_user: ...@@ -95,10 +95,9 @@ bad_get_user:
CFI_ENDPROC CFI_ENDPROC
END(bad_get_user) END(bad_get_user)
.section __ex_table,"a" _ASM_EXTABLE(1b,bad_get_user)
_ASM_PTR 1b,bad_get_user _ASM_EXTABLE(2b,bad_get_user)
_ASM_PTR 2b,bad_get_user _ASM_EXTABLE(3b,bad_get_user)
_ASM_PTR 3b,bad_get_user
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
_ASM_PTR 4b,bad_get_user _ASM_EXTABLE(4b,bad_get_user)
#endif #endif
...@@ -86,12 +86,10 @@ bad_put_user: ...@@ -86,12 +86,10 @@ bad_put_user:
EXIT EXIT
END(bad_put_user) END(bad_put_user)
.section __ex_table,"a" _ASM_EXTABLE(1b,bad_put_user)
_ASM_PTR 1b,bad_put_user _ASM_EXTABLE(2b,bad_put_user)
_ASM_PTR 2b,bad_put_user _ASM_EXTABLE(3b,bad_put_user)
_ASM_PTR 3b,bad_put_user _ASM_EXTABLE(4b,bad_put_user)
_ASM_PTR 4b,bad_put_user
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
_ASM_PTR 5b,bad_put_user _ASM_EXTABLE(5b,bad_put_user)
#endif #endif
.previous
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmx.h> #include <asm/mmx.h>
#include <asm/asm.h>
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
/* /*
...@@ -127,10 +128,7 @@ long strnlen_user(const char __user *s, long n) ...@@ -127,10 +128,7 @@ long strnlen_user(const char __user *s, long n)
"3: movb $1,%%al\n" "3: movb $1,%%al\n"
" jmp 1b\n" " jmp 1b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(0b,2b)
" .align 4\n"
" .long 0b,2b\n"
".previous"
:"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
:"0" (n), "1" (s), "2" (0), "3" (mask) :"0" (n), "1" (s), "2" (0), "3" (mask)
:"cc"); :"cc");
...@@ -199,47 +197,44 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) ...@@ -199,47 +197,44 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
"101: lea 0(%%eax,%0,4),%0\n" "101: lea 0(%%eax,%0,4),%0\n"
" jmp 100b\n" " jmp 100b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(1b,100b)
" .align 4\n" _ASM_EXTABLE(2b,100b)
" .long 1b,100b\n" _ASM_EXTABLE(3b,100b)
" .long 2b,100b\n" _ASM_EXTABLE(4b,100b)
" .long 3b,100b\n" _ASM_EXTABLE(5b,100b)
" .long 4b,100b\n" _ASM_EXTABLE(6b,100b)
" .long 5b,100b\n" _ASM_EXTABLE(7b,100b)
" .long 6b,100b\n" _ASM_EXTABLE(8b,100b)
" .long 7b,100b\n" _ASM_EXTABLE(9b,100b)
" .long 8b,100b\n" _ASM_EXTABLE(10b,100b)
" .long 9b,100b\n" _ASM_EXTABLE(11b,100b)
" .long 10b,100b\n" _ASM_EXTABLE(12b,100b)
" .long 11b,100b\n" _ASM_EXTABLE(13b,100b)
" .long 12b,100b\n" _ASM_EXTABLE(14b,100b)
" .long 13b,100b\n" _ASM_EXTABLE(15b,100b)
" .long 14b,100b\n" _ASM_EXTABLE(16b,100b)
" .long 15b,100b\n" _ASM_EXTABLE(17b,100b)
" .long 16b,100b\n" _ASM_EXTABLE(18b,100b)
" .long 17b,100b\n" _ASM_EXTABLE(19b,100b)
" .long 18b,100b\n" _ASM_EXTABLE(20b,100b)
" .long 19b,100b\n" _ASM_EXTABLE(21b,100b)
" .long 20b,100b\n" _ASM_EXTABLE(22b,100b)
" .long 21b,100b\n" _ASM_EXTABLE(23b,100b)
" .long 22b,100b\n" _ASM_EXTABLE(24b,100b)
" .long 23b,100b\n" _ASM_EXTABLE(25b,100b)
" .long 24b,100b\n" _ASM_EXTABLE(26b,100b)
" .long 25b,100b\n" _ASM_EXTABLE(27b,100b)
" .long 26b,100b\n" _ASM_EXTABLE(28b,100b)
" .long 27b,100b\n" _ASM_EXTABLE(29b,100b)
" .long 28b,100b\n" _ASM_EXTABLE(30b,100b)
" .long 29b,100b\n" _ASM_EXTABLE(31b,100b)
" .long 30b,100b\n" _ASM_EXTABLE(32b,100b)
" .long 31b,100b\n" _ASM_EXTABLE(33b,100b)
" .long 32b,100b\n" _ASM_EXTABLE(34b,100b)
" .long 33b,100b\n" _ASM_EXTABLE(35b,100b)
" .long 34b,100b\n" _ASM_EXTABLE(36b,100b)
" .long 35b,100b\n" _ASM_EXTABLE(37b,100b)
" .long 36b,100b\n" _ASM_EXTABLE(99b,101b)
" .long 37b,100b\n"
" .long 99b,101b\n"
".previous"
: "=&c"(size), "=&D" (d0), "=&S" (d1) : "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size) : "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory"); : "eax", "edx", "memory");
...@@ -312,29 +307,26 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) ...@@ -312,29 +307,26 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
" popl %0\n" " popl %0\n"
" jmp 8b\n" " jmp 8b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(0b,16b)
" .align 4\n" _ASM_EXTABLE(1b,16b)
" .long 0b,16b\n" _ASM_EXTABLE(2b,16b)
" .long 1b,16b\n" _ASM_EXTABLE(21b,16b)
" .long 2b,16b\n" _ASM_EXTABLE(3b,16b)
" .long 21b,16b\n" _ASM_EXTABLE(31b,16b)
" .long 3b,16b\n" _ASM_EXTABLE(4b,16b)
" .long 31b,16b\n" _ASM_EXTABLE(41b,16b)
" .long 4b,16b\n" _ASM_EXTABLE(10b,16b)
" .long 41b,16b\n" _ASM_EXTABLE(51b,16b)
" .long 10b,16b\n" _ASM_EXTABLE(11b,16b)
" .long 51b,16b\n" _ASM_EXTABLE(61b,16b)
" .long 11b,16b\n" _ASM_EXTABLE(12b,16b)
" .long 61b,16b\n" _ASM_EXTABLE(71b,16b)
" .long 12b,16b\n" _ASM_EXTABLE(13b,16b)
" .long 71b,16b\n" _ASM_EXTABLE(81b,16b)
" .long 13b,16b\n" _ASM_EXTABLE(14b,16b)
" .long 81b,16b\n" _ASM_EXTABLE(91b,16b)
" .long 14b,16b\n" _ASM_EXTABLE(6b,9b)
" .long 91b,16b\n" _ASM_EXTABLE(7b,16b)
" .long 6b,9b\n"
" .long 7b,16b\n"
".previous"
: "=&c"(size), "=&D" (d0), "=&S" (d1) : "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size) : "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory"); : "eax", "edx", "memory");
...@@ -414,29 +406,26 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, ...@@ -414,29 +406,26 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
" popl %0\n" " popl %0\n"
" jmp 8b\n" " jmp 8b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(0b,16b)
" .align 4\n" _ASM_EXTABLE(1b,16b)
" .long 0b,16b\n" _ASM_EXTABLE(2b,16b)
" .long 1b,16b\n" _ASM_EXTABLE(21b,16b)
" .long 2b,16b\n" _ASM_EXTABLE(3b,16b)
" .long 21b,16b\n" _ASM_EXTABLE(31b,16b)
" .long 3b,16b\n" _ASM_EXTABLE(4b,16b)
" .long 31b,16b\n" _ASM_EXTABLE(41b,16b)
" .long 4b,16b\n" _ASM_EXTABLE(10b,16b)
" .long 41b,16b\n" _ASM_EXTABLE(51b,16b)
" .long 10b,16b\n" _ASM_EXTABLE(11b,16b)
" .long 51b,16b\n" _ASM_EXTABLE(61b,16b)
" .long 11b,16b\n" _ASM_EXTABLE(12b,16b)
" .long 61b,16b\n" _ASM_EXTABLE(71b,16b)
" .long 12b,16b\n" _ASM_EXTABLE(13b,16b)
" .long 71b,16b\n" _ASM_EXTABLE(81b,16b)
" .long 13b,16b\n" _ASM_EXTABLE(14b,16b)
" .long 81b,16b\n" _ASM_EXTABLE(91b,16b)
" .long 14b,16b\n" _ASM_EXTABLE(6b,9b)
" .long 91b,16b\n" _ASM_EXTABLE(7b,16b)
" .long 6b,9b\n"
" .long 7b,16b\n"
".previous"
: "=&c"(size), "=&D" (d0), "=&S" (d1) : "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size) : "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory"); : "eax", "edx", "memory");
...@@ -505,29 +494,26 @@ static unsigned long __copy_user_intel_nocache(void *to, ...@@ -505,29 +494,26 @@ static unsigned long __copy_user_intel_nocache(void *to,
"9: lea 0(%%eax,%0,4),%0\n" "9: lea 0(%%eax,%0,4),%0\n"
"16: jmp 8b\n" "16: jmp 8b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" _ASM_EXTABLE(0b,16b)
" .align 4\n" _ASM_EXTABLE(1b,16b)
" .long 0b,16b\n" _ASM_EXTABLE(2b,16b)
" .long 1b,16b\n" _ASM_EXTABLE(21b,16b)
" .long 2b,16b\n" _ASM_EXTABLE(3b,16b)
" .long 21b,16b\n" _ASM_EXTABLE(31b,16b)
" .long 3b,16b\n" _ASM_EXTABLE(4b,16b)
" .long 31b,16b\n" _ASM_EXTABLE(41b,16b)
" .long 4b,16b\n" _ASM_EXTABLE(10b,16b)
" .long 41b,16b\n" _ASM_EXTABLE(51b,16b)
" .long 10b,16b\n" _ASM_EXTABLE(11b,16b)
" .long 51b,16b\n" _ASM_EXTABLE(61b,16b)
" .long 11b,16b\n" _ASM_EXTABLE(12b,16b)
" .long 61b,16b\n" _ASM_EXTABLE(71b,16b)
" .long 12b,16b\n" _ASM_EXTABLE(13b,16b)
" .long 71b,16b\n" _ASM_EXTABLE(81b,16b)
" .long 13b,16b\n" _ASM_EXTABLE(14b,16b)
" .long 81b,16b\n" _ASM_EXTABLE(91b,16b)
" .long 14b,16b\n" _ASM_EXTABLE(6b,9b)
" .long 91b,16b\n" _ASM_EXTABLE(7b,16b)
" .long 6b,9b\n"
" .long 7b,16b\n"
".previous"
: "=&c"(size), "=&D" (d0), "=&S" (d1) : "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size) : "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory"); : "eax", "edx", "memory");
...@@ -574,12 +560,9 @@ do { \ ...@@ -574,12 +560,9 @@ do { \
"3: lea 0(%3,%0,4),%0\n" \ "3: lea 0(%3,%0,4),%0\n" \
" jmp 2b\n" \ " jmp 2b\n" \
".previous\n" \ ".previous\n" \
".section __ex_table,\"a\"\n" \ _ASM_EXTABLE(4b,5b) \
" .align 4\n" \ _ASM_EXTABLE(0b,3b) \
" .long 4b,5b\n" \ _ASM_EXTABLE(1b,2b) \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \ : "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \ : "memory"); \
...@@ -616,12 +599,9 @@ do { \ ...@@ -616,12 +599,9 @@ do { \
" popl %0\n" \ " popl %0\n" \
" jmp 2b\n" \ " jmp 2b\n" \
".previous\n" \ ".previous\n" \
".section __ex_table,\"a\"\n" \ _ASM_EXTABLE(4b,5b) \
" .align 4\n" \ _ASM_EXTABLE(0b,3b) \
" .long 4b,5b\n" \ _ASM_EXTABLE(1b,6b) \
" .long 0b,3b\n" \
" .long 1b,6b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \ : "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \ : "memory"); \
......
#include <linux/module.h> #include <linux/module.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sort.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
static inline unsigned long
ex_insn_addr(const struct exception_table_entry *x)
{
return (unsigned long)&x->insn + x->insn;
}
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
int fixup_exception(struct pt_regs *regs) int fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
unsigned long new_ip;
#ifdef CONFIG_PNPBIOS #ifdef CONFIG_PNPBIOS
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
...@@ -23,15 +35,135 @@ int fixup_exception(struct pt_regs *regs) ...@@ -23,15 +35,135 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(regs->ip); fixup = search_exception_tables(regs->ip);
if (fixup) { if (fixup) {
/* If fixup is less than 16, it means uaccess error */ new_ip = ex_fixup_addr(fixup);
if (fixup->fixup < 16) {
if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
/* Special hack for uaccess_err */
current_thread_info()->uaccess_err = 1; current_thread_info()->uaccess_err = 1;
regs->ip += fixup->fixup; new_ip -= 0x7ffffff0;
return 1;
} }
regs->ip = fixup->fixup; regs->ip = new_ip;
return 1; return 1;
} }
return 0; return 0;
} }
/* Restricted version used during very early boot */
int __init early_fixup_exception(unsigned long *ip)
{
const struct exception_table_entry *fixup;
unsigned long new_ip;
fixup = search_exception_tables(*ip);
if (fixup) {
new_ip = ex_fixup_addr(fixup);
if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
/* uaccess handling not supported during early boot */
return 0;
}
*ip = new_ip;
return 1;
}
return 0;
}
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
while (first <= last) {
const struct exception_table_entry *mid;
unsigned long addr;
mid = ((last - first) >> 1) + first;
addr = ex_insn_addr(mid);
if (addr < value)
first = mid + 1;
else if (addr > value)
last = mid - 1;
else
return mid;
}
return NULL;
}
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*
*/
static int cmp_ex(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/*
* This value will always end up fittin in an int, because on
* both i386 and x86-64 the kernel symbol-reachable address
* space is < 2 GiB.
*
* This compare is only valid after normalization.
*/
return x->insn - y->insn;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
struct exception_table_entry *p;
int i;
/* Convert all entries to being relative to the start of the section */
i = 0;
for (p = start; p < finish; p++) {
p->insn += i;
i += 4;
p->fixup += i;
i += 4;
}
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex, NULL);
/* Denormalize all entries */
i = 0;
for (p = start; p < finish; p++) {
p->insn -= i;
i += 4;
p->fixup -= i;
i += 4;
}
}
#ifdef CONFIG_MODULES
/*
* If the exception table is sorted, any referring to the module init
* will be at the beginning or the end.
*/
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
while (m->num_exentries &&
within_module_init(ex_insn_addr(&m->extable[0]), m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
within_module_init(ex_insn_addr(&m->extable[m->num_exentries-1]), m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
*/ */
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h>
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
...@@ -232,15 +233,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, ...@@ -232,15 +233,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define SRC(y...) \ #define SRC(y...) \
9999: y; \ 9999: y; \
.section __ex_table, "a"; \ _ASM_EXTABLE(9999b, 6001f)
.long 9999b, 6001f ; \
.previous
#define DST(y...) \ #define DST(y...) \
9999: y; \ 9999: y; \
.section __ex_table, "a"; \ _ASM_EXTABLE(9999b, 6002f)
.long 9999b, 6002f ; \
.previous
.align 4 .align 4
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/asm.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -137,10 +138,7 @@ iret_restore_end: ...@@ -137,10 +138,7 @@ iret_restore_end:
1: iret 1: iret
xen_iret_end_crit: xen_iret_end_crit:
.section __ex_table, "a" _ASM_EXTABLE(1b, iret_exc)
.align 4
.long 1b, iret_exc
.previous
hyper_iret: hyper_iret:
/* put this out of line since its very rarely used */ /* put this out of line since its very rarely used */
......
...@@ -27,6 +27,9 @@ config IRQ_WORK ...@@ -27,6 +27,9 @@ config IRQ_WORK
bool bool
depends on HAVE_IRQ_WORK depends on HAVE_IRQ_WORK
config BUILDTIME_EXTABLE_SORT
bool
menu "General setup" menu "General setup"
config EXPERIMENTAL config EXPERIMENTAL
......
...@@ -35,10 +35,16 @@ DEFINE_MUTEX(text_mutex); ...@@ -35,10 +35,16 @@ DEFINE_MUTEX(text_mutex);
extern struct exception_table_entry __start___ex_table[]; extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[]; extern struct exception_table_entry __stop___ex_table[];
/* Cleared by build time tools if the table is already sorted. */
u32 __initdata main_extable_sort_needed = 1;
/* Sort the kernel's built-in exception table */ /* Sort the kernel's built-in exception table */
void __init sort_main_extable(void) void __init sort_main_extable(void)
{ {
sort_extable(__start___ex_table, __stop___ex_table); if (main_extable_sort_needed)
sort_extable(__start___ex_table, __stop___ex_table);
else
pr_notice("__ex_table already sorted, skipping sort\n");
} }
/* Given an address, look for it in the exception tables. */ /* Given an address, look for it in the exception tables. */
......
...@@ -9,3 +9,4 @@ unifdef ...@@ -9,3 +9,4 @@ unifdef
ihex2fw ihex2fw
recordmcount recordmcount
docproc docproc
sortextable
...@@ -15,6 +15,9 @@ hostprogs-$(CONFIG_LOGO) += pnmtologo ...@@ -15,6 +15,9 @@ hostprogs-$(CONFIG_LOGO) += pnmtologo
hostprogs-$(CONFIG_VT) += conmakehash hostprogs-$(CONFIG_VT) += conmakehash
hostprogs-$(CONFIG_IKCONFIG) += bin2c hostprogs-$(CONFIG_IKCONFIG) += bin2c
hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
hostprogs-$(CONFIG_BUILDTIME_EXTABLE_SORT) += sortextable
HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
always := $(hostprogs-y) $(hostprogs-m) always := $(hostprogs-y) $(hostprogs-m)
......
/*
* sortextable.c: Sort the kernel's exception table
*
* Copyright 2011 - 2012 Cavium, Inc.
*
* Based on code taken from recortmcount.c which is:
*
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
* Licensed under the GNU General Public License, version 2 (GPLv2).
*
* Restructured to fit Linux format, as well as other updates:
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
*/
/*
* Strategy: alter the vmlinux file in-place.
*/
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <getopt.h>
#include <elf.h>
#include <fcntl.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <tools/be_byteshift.h>
#include <tools/le_byteshift.h>
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
static struct stat sb; /* Remember .st_size, etc. */
static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
/* setjmp() return values */
enum {
SJ_SETJMP = 0, /* hardwired first return */
SJ_FAIL,
SJ_SUCCEED
};
/* Per-file resource cleanup when multiple files. */
static void
cleanup(void)
{
if (!mmap_failed)
munmap(ehdr_curr, sb.st_size);
close(fd_map);
}
static void __attribute__((noreturn))
fail_file(void)
{
cleanup();
longjmp(jmpenv, SJ_FAIL);
}
static void __attribute__((noreturn))
succeed_file(void)
{
cleanup();
longjmp(jmpenv, SJ_SUCCEED);
}
/*
* Get the whole file as a programming convenience in order to avoid
* malloc+lseek+read+free of many pieces. If successful, then mmap
* avoids copying unused pieces; else just read the whole file.
* Open for both read and write.
*/
static void *mmap_file(char const *fname)
{
void *addr;
fd_map = open(fname, O_RDWR);
if (fd_map < 0 || fstat(fd_map, &sb) < 0) {
perror(fname);
fail_file();
}
if (!S_ISREG(sb.st_mode)) {
fprintf(stderr, "not a regular file: %s\n", fname);
fail_file();
}
addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED,
fd_map, 0);
if (addr == MAP_FAILED) {
mmap_failed = 1;
fprintf(stderr, "Could not mmap file: %s\n", fname);
fail_file();
}
return addr;
}
static uint64_t r8be(const uint64_t *x)
{
return get_unaligned_be64(x);
}
static uint32_t rbe(const uint32_t *x)
{
return get_unaligned_be32(x);
}
static uint16_t r2be(const uint16_t *x)
{
return get_unaligned_be16(x);
}
static uint64_t r8le(const uint64_t *x)
{
return get_unaligned_le64(x);
}
static uint32_t rle(const uint32_t *x)
{
return get_unaligned_le32(x);
}
static uint16_t r2le(const uint16_t *x)
{
return get_unaligned_le16(x);
}
static void w8be(uint64_t val, uint64_t *x)
{
put_unaligned_be64(val, x);
}
static void wbe(uint32_t val, uint32_t *x)
{
put_unaligned_be32(val, x);
}
static void w2be(uint16_t val, uint16_t *x)
{
put_unaligned_be16(val, x);
}
static void w8le(uint64_t val, uint64_t *x)
{
put_unaligned_le64(val, x);
}
static void wle(uint32_t val, uint32_t *x)
{
put_unaligned_le32(val, x);
}
static void w2le(uint16_t val, uint16_t *x)
{
put_unaligned_le16(val, x);
}
static uint64_t (*r8)(const uint64_t *);
static uint32_t (*r)(const uint32_t *);
static uint16_t (*r2)(const uint16_t *);
static void (*w8)(uint64_t, uint64_t *);
static void (*w)(uint32_t, uint32_t *);
static void (*w2)(uint16_t, uint16_t *);
typedef void (*table_sort_t)(char *, int);
/* 32 bit and 64 bit are very similar */
#include "sortextable.h"
#define SORTEXTABLE_64
#include "sortextable.h"
static int compare_x86_table(const void *a, const void *b)
{
int32_t av = (int32_t)r(a);
int32_t bv = (int32_t)r(b);
if (av < bv)
return -1;
if (av > bv)
return 1;
return 0;
}
static void sort_x86_table(char *extab_image, int image_size)
{
int i;
/*
* Do the same thing the runtime sort does, first normalize to
* being relative to the start of the section.
*/
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) + i, loc);
i += 4;
}
qsort(extab_image, image_size / 8, 8, compare_x86_table);
/* Now denormalize. */
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) - i, loc);
i += 4;
}
}
static void
do_file(char const *const fname)
{
table_sort_t custom_sort;
Elf32_Ehdr *ehdr = mmap_file(fname);
ehdr_curr = ehdr;
switch (ehdr->e_ident[EI_DATA]) {
default:
fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
ehdr->e_ident[EI_DATA], fname);
fail_file();
break;
case ELFDATA2LSB:
r = rle;
r2 = r2le;
r8 = r8le;
w = wle;
w2 = w2le;
w8 = w8le;
break;
case ELFDATA2MSB:
r = rbe;
r2 = r2be;
r8 = r8be;
w = wbe;
w2 = w2be;
w8 = w8be;
break;
} /* end switch */
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
|| r2(&ehdr->e_type) != ET_EXEC
|| ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
fprintf(stderr, "unrecognized ET_EXEC file %s\n", fname);
fail_file();
}
custom_sort = NULL;
switch (r2(&ehdr->e_machine)) {
default:
fprintf(stderr, "unrecognized e_machine %d %s\n",
r2(&ehdr->e_machine), fname);
fail_file();
break;
case EM_386:
case EM_X86_64:
custom_sort = sort_x86_table;
break;
case EM_MIPS:
break;
} /* end switch */
switch (ehdr->e_ident[EI_CLASS]) {
default:
fprintf(stderr, "unrecognized ELF class %d %s\n",
ehdr->e_ident[EI_CLASS], fname);
fail_file();
break;
case ELFCLASS32:
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
|| r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
fprintf(stderr,
"unrecognized ET_EXEC file: %s\n", fname);
fail_file();
}
do32(ehdr, fname, custom_sort);
break;
case ELFCLASS64: {
Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
|| r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
fprintf(stderr,
"unrecognized ET_EXEC file: %s\n", fname);
fail_file();
}
do64(ghdr, fname, custom_sort);
break;
}
} /* end switch */
cleanup();
}
int
main(int argc, char *argv[])
{
int n_error = 0; /* gcc-4.3.0 false positive complaint */
int i;
if (argc < 2) {
fprintf(stderr, "usage: sortextable vmlinux...\n");
return 0;
}
/* Process each file in turn, allowing deep failure. */
for (i = 1; i < argc; i++) {
char *file = argv[i];
int const sjval = setjmp(jmpenv);
switch (sjval) {
default:
fprintf(stderr, "internal error: %s\n", file);
exit(1);
break;
case SJ_SETJMP: /* normal sequence */
/* Avoid problems if early cleanup() */
fd_map = -1;
ehdr_curr = NULL;
mmap_failed = 1;
do_file(file);
break;
case SJ_FAIL: /* error in do_file or below */
++n_error;
break;
case SJ_SUCCEED: /* premature success */
/* do nothing */
break;
} /* end switch */
}
return !!n_error;
}
/*
* sortextable.h
*
* Copyright 2011 - 2012 Cavium, Inc.
*
* Some of this code was taken out of recordmcount.h written by:
*
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
*
*
* Licensed under the GNU General Public License, version 2 (GPLv2).
*/
#undef extable_ent_size
#undef compare_extable
#undef do_func
#undef Elf_Addr
#undef Elf_Ehdr
#undef Elf_Shdr
#undef Elf_Rel
#undef Elf_Rela
#undef Elf_Sym
#undef ELF_R_SYM
#undef Elf_r_sym
#undef ELF_R_INFO
#undef Elf_r_info
#undef ELF_ST_BIND
#undef ELF_ST_TYPE
#undef fn_ELF_R_SYM
#undef fn_ELF_R_INFO
#undef uint_t
#undef _r
#undef _w
#ifdef SORTEXTABLE_64
# define extable_ent_size 16
# define compare_extable compare_extable_64
# define do_func do64
# define Elf_Addr Elf64_Addr
# define Elf_Ehdr Elf64_Ehdr
# define Elf_Shdr Elf64_Shdr
# define Elf_Rel Elf64_Rel
# define Elf_Rela Elf64_Rela
# define Elf_Sym Elf64_Sym
# define ELF_R_SYM ELF64_R_SYM
# define Elf_r_sym Elf64_r_sym
# define ELF_R_INFO ELF64_R_INFO
# define Elf_r_info Elf64_r_info
# define ELF_ST_BIND ELF64_ST_BIND
# define ELF_ST_TYPE ELF64_ST_TYPE
# define fn_ELF_R_SYM fn_ELF64_R_SYM
# define fn_ELF_R_INFO fn_ELF64_R_INFO
# define uint_t uint64_t
# define _r r8
# define _w w8
#else
# define extable_ent_size 8
# define compare_extable compare_extable_32
# define do_func do32
# define Elf_Addr Elf32_Addr
# define Elf_Ehdr Elf32_Ehdr
# define Elf_Shdr Elf32_Shdr
# define Elf_Rel Elf32_Rel
# define Elf_Rela Elf32_Rela
# define Elf_Sym Elf32_Sym
# define ELF_R_SYM ELF32_R_SYM
# define Elf_r_sym Elf32_r_sym
# define ELF_R_INFO ELF32_R_INFO
# define Elf_r_info Elf32_r_info
# define ELF_ST_BIND ELF32_ST_BIND
# define ELF_ST_TYPE ELF32_ST_TYPE
# define fn_ELF_R_SYM fn_ELF32_R_SYM
# define fn_ELF_R_INFO fn_ELF32_R_INFO
# define uint_t uint32_t
# define _r r
# define _w w
#endif
static int compare_extable(const void *a, const void *b)
{
Elf_Addr av = _r(a);
Elf_Addr bv = _r(b);
if (av < bv)
return -1;
if (av > bv)
return 1;
return 0;
}
static void
do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
{
Elf_Shdr *shdr;
Elf_Shdr *shstrtab_sec;
Elf_Shdr *strtab_sec = NULL;
Elf_Shdr *symtab_sec = NULL;
Elf_Shdr *extab_sec = NULL;
Elf_Sym *sym;
Elf_Sym *sort_needed_sym;
Elf_Shdr *sort_needed_sec;
Elf_Rel *relocs = NULL;
int relocs_size;
uint32_t *sort_done_location;
const char *secstrtab;
const char *strtab;
char *extab_image;
int extab_index = 0;
int i;
int idx;
shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
secstrtab = (const char *)ehdr + _r(&shstrtab_sec->sh_offset);
for (i = 0; i < r2(&ehdr->e_shnum); i++) {
idx = r(&shdr[i].sh_name);
if (strcmp(secstrtab + idx, "__ex_table") == 0) {
extab_sec = shdr + i;
extab_index = i;
}
if ((r(&shdr[i].sh_type) == SHT_REL ||
r(&shdr[i].sh_type) == SHT_RELA) &&
r(&shdr[i].sh_info) == extab_index) {
relocs = (void *)ehdr + _r(&shdr[i].sh_offset);
relocs_size = _r(&shdr[i].sh_size);
}
if (strcmp(secstrtab + idx, ".symtab") == 0)
symtab_sec = shdr + i;
if (strcmp(secstrtab + idx, ".strtab") == 0)
strtab_sec = shdr + i;
}
if (strtab_sec == NULL) {
fprintf(stderr, "no .strtab in file: %s\n", fname);
fail_file();
}
if (symtab_sec == NULL) {
fprintf(stderr, "no .symtab in file: %s\n", fname);
fail_file();
}
if (extab_sec == NULL) {
fprintf(stderr, "no __ex_table in file: %s\n", fname);
fail_file();
}
strtab = (const char *)ehdr + _r(&strtab_sec->sh_offset);
extab_image = (void *)ehdr + _r(&extab_sec->sh_offset);
if (custom_sort) {
custom_sort(extab_image, _r(&extab_sec->sh_size));
} else {
int num_entries = _r(&extab_sec->sh_size) / extable_ent_size;
qsort(extab_image, num_entries,
extable_ent_size, compare_extable);
}
/* If there were relocations, we no longer need them. */
if (relocs)
memset(relocs, 0, relocs_size);
/* find main_extable_sort_needed */
sort_needed_sym = NULL;
for (i = 0; i < _r(&symtab_sec->sh_size) / sizeof(Elf_Sym); i++) {
sym = (void *)ehdr + _r(&symtab_sec->sh_offset);
sym += i;
if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
continue;
idx = r(&sym->st_name);
if (strcmp(strtab + idx, "main_extable_sort_needed") == 0) {
sort_needed_sym = sym;
break;
}
}
if (sort_needed_sym == NULL) {
fprintf(stderr,
"no main_extable_sort_needed symbol in file: %s\n",
fname);
fail_file();
}
sort_needed_sec = &shdr[r2(&sort_needed_sym->st_shndx)];
sort_done_location = (void *)ehdr +
_r(&sort_needed_sec->sh_offset) +
_r(&sort_needed_sym->st_value) -
_r(&sort_needed_sec->sh_addr);
#if 1
printf("sort done marker at %lx\n",
(unsigned long)((char *)sort_done_location - (char *)ehdr));
#endif
/* We sorted it, clear the flag. */
w(0, sort_done_location);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment