Commit 3fb9268e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:
 "The main changes in this cycle were:

   - unwinder fixes and enhancements

   - improve ftrace interaction with the unwinder

   - optimize the code footprint of WARN() and related debugging
     constructs

   - ... plus misc updates, cleanups and fixes"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  x86/unwind: Dump all stacks in unwind_dump()
  x86/unwind: Silence more entry-code related warnings
  x86/ftrace: Fix ebp in ftrace_regs_caller that screws up unwinder
  x86/unwind: Remove unused 'sp' parameter in unwind_dump()
  x86/unwind: Prepend hex mask value with '0x' in unwind_dump()
  x86/unwind: Properly zero-pad 32-bit values in unwind_dump()
  x86/unwind: Ensure stack pointer is aligned
  debug: Avoid setting BUGFLAG_WARNING twice
  x86/unwind: Silence entry-related warnings
  x86/unwind: Read stack return address in update_stack_state()
  x86/unwind: Move common code into update_stack_state()
  debug: Fix __bug_table[] in arch linker scripts
  debug: Add _ONCE() logic to report_bug()
  x86/debug: Define BUG() again for !CONFIG_BUG
  x86/debug: Implement __WARN() using UD0
  x86/ftrace: Use Makefile logic instead of #ifdef for compiling ftrace_*.o
  x86/ftrace: Add -mfentry support to x86_32 with DYNAMIC_FTRACE set
  x86/ftrace: Clean up ftrace_regs_caller
  x86/ftrace: Add stack frame pointer to ftrace_caller
  x86/ftrace: Move the ftrace specific code out of entry_32.S
  ...
parents 12ca7c8d 262fa734
...@@ -242,6 +242,8 @@ SECTIONS ...@@ -242,6 +242,8 @@ SECTIONS
} }
_edata_loc = __data_loc + SIZEOF(.data); _edata_loc = __data_loc + SIZEOF(.data);
BUG_TABLE
#ifdef CONFIG_HAVE_TCM #ifdef CONFIG_HAVE_TCM
/* /*
* We align everything to a page boundary so we can * We align everything to a page boundary so we can
......
...@@ -262,6 +262,8 @@ SECTIONS ...@@ -262,6 +262,8 @@ SECTIONS
} }
_edata_loc = __data_loc + SIZEOF(.data); _edata_loc = __data_loc + SIZEOF(.data);
BUG_TABLE
#ifdef CONFIG_HAVE_TCM #ifdef CONFIG_HAVE_TCM
/* /*
* We align everything to a page boundary so we can * We align everything to a page boundary so we can
......
...@@ -55,7 +55,7 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ ...@@ -55,7 +55,7 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
unreachable(); \ unreachable(); \
} while (0) } while (0)
#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint)) #define __WARN_FLAGS(flags) _BUG_FLAGS(BUGFLAG_WARNING|(flags))
#endif /* ! CONFIG_GENERIC_BUG */ #endif /* ! CONFIG_GENERIC_BUG */
......
...@@ -115,6 +115,8 @@ SECTIONS ...@@ -115,6 +115,8 @@ SECTIONS
__data_lma = LOADADDR(.data); __data_lma = LOADADDR(.data);
__data_len = SIZEOF(.data); __data_len = SIZEOF(.data);
BUG_TABLE
/* The init section should be last, so when we free it, it goes into /* The init section should be last, so when we free it, it goes into
* the general memory pool, and (hopefully) will decrease fragmentation * the general memory pool, and (hopefully) will decrease fragmentation
* a tiny bit. The init section has a _requirement_ that it be * a tiny bit. The init section has a _requirement_ that it be
......
...@@ -128,6 +128,8 @@ SECTIONS ...@@ -128,6 +128,8 @@ SECTIONS
. = ALIGN(8); . = ALIGN(8);
} }
BUG_TABLE
_edata = .; _edata = .;
__bss_start = .; __bss_start = .;
......
...@@ -68,6 +68,8 @@ SECTIONS ...@@ -68,6 +68,8 @@ SECTIONS
__edata = . ; /* End of data section. */ __edata = . ; /* End of data section. */
_edata = . ; _edata = . ;
BUG_TABLE
INIT_TASK_DATA_SECTION(PAGE_SIZE) INIT_TASK_DATA_SECTION(PAGE_SIZE)
. = ALIGN(PAGE_SIZE); /* Init code and data. */ . = ALIGN(PAGE_SIZE); /* Init code and data. */
......
...@@ -102,6 +102,8 @@ SECTIONS ...@@ -102,6 +102,8 @@ SECTIONS
_edata = .; /* End of data section */ _edata = .; /* End of data section */
BUG_TABLE
/* GP section */ /* GP section */
. = ALIGN(L1_CACHE_BYTES); . = ALIGN(L1_CACHE_BYTES);
_gp = . + 2048; _gp = . + 2048;
......
...@@ -192,6 +192,8 @@ SECTIONS { ...@@ -192,6 +192,8 @@ SECTIONS {
CONSTRUCTORS CONSTRUCTORS
} }
BUG_TABLE
. = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
.got : AT(ADDR(.got) - LOAD_OFFSET) { .got : AT(ADDR(.got) - LOAD_OFFSET) {
*(.got.plt) *(.got.plt)
......
...@@ -97,6 +97,7 @@ SECTIONS ...@@ -97,6 +97,7 @@ SECTIONS
DATA_DATA DATA_DATA
CONSTRUCTORS CONSTRUCTORS
} }
BUG_TABLE
_gp = . + 0x8000; _gp = . + 0x8000;
.lit8 : { .lit8 : {
*(.lit8) *(.lit8)
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#endif #endif
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define __WARN_TAINT(taint) \ #define __WARN_FLAGS(flags) \
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
...@@ -56,11 +56,11 @@ ...@@ -56,11 +56,11 @@
"\t.org 2b+%c3\n" \ "\t.org 2b+%c3\n" \
"\t.popsection" \ "\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_TAINT(taint)), \ "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \ "i" (sizeof(struct bug_entry)) ); \
} while(0) } while(0)
#else #else
#define __WARN_TAINT(taint) \ #define __WARN_FLAGS(flags) \
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
"\t.short %c0\n" \ "\t.short %c0\n" \
"\t.org 2b+%c1\n" \ "\t.org 2b+%c1\n" \
"\t.popsection" \ "\t.popsection" \
: : "i" (BUGFLAG_TAINT(taint)), \ : : "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \ "i" (sizeof(struct bug_entry)) ); \
} while(0) } while(0)
#endif #endif
......
...@@ -85,12 +85,12 @@ ...@@ -85,12 +85,12 @@
} \ } \
} while (0) } while (0)
#define __WARN_TAINT(taint) do { \ #define __WARN_FLAGS(flags) do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: twi 31,0,0\n" \ "1: twi 31,0,0\n" \
_EMIT_BUG_ENTRY \ _EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_TAINT(taint)), \ "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry))); \ "i" (sizeof(struct bug_entry))); \
} while (0) } while (0)
......
...@@ -312,6 +312,8 @@ SECTIONS ...@@ -312,6 +312,8 @@ SECTIONS
NOSAVE_DATA NOSAVE_DATA
} }
BUG_TABLE
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
_edata = .; _edata = .;
PROVIDE32 (edata = .); PROVIDE32 (edata = .);
......
...@@ -46,8 +46,8 @@ ...@@ -46,8 +46,8 @@
unreachable(); \ unreachable(); \
} while (0) } while (0)
#define __WARN_TAINT(taint) do { \ #define __WARN_FLAGS(flags) do { \
__EMIT_BUG(BUGFLAG_TAINT(taint)); \ __EMIT_BUG(BUGFLAG_WARNING|(flags)); \
} while (0) } while (0)
#define WARN_ON(x) ({ \ #define WARN_ON(x) ({ \
......
...@@ -50,7 +50,7 @@ do { \ ...@@ -50,7 +50,7 @@ do { \
"i" (sizeof(struct bug_entry))); \ "i" (sizeof(struct bug_entry))); \
} while (0) } while (0)
#define __WARN_TAINT(taint) \ #define __WARN_FLAGS(flags) \
do { \ do { \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"1:\t.short %O0\n" \ "1:\t.short %O0\n" \
...@@ -59,7 +59,7 @@ do { \ ...@@ -59,7 +59,7 @@ do { \
: "n" (TRAPA_BUG_OPCODE), \ : "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \ "i" (__FILE__), \
"i" (__LINE__), \ "i" (__LINE__), \
"i" (BUGFLAG_TAINT(taint)), \ "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry))); \ "i" (sizeof(struct bug_entry))); \
} while (0) } while (0)
......
...@@ -50,11 +50,6 @@ config GENERIC_CALIBRATE_DELAY ...@@ -50,11 +50,6 @@ config GENERIC_CALIBRATE_DELAY
bool bool
default y default y
config GENERIC_BUG
bool
default y
depends on BUG
config HZ config HZ
int int
default 100 default 100
......
...@@ -126,7 +126,7 @@ config X86 ...@@ -126,7 +126,7 @@ config X86
select HAVE_EBPF_JIT if X86_64 select HAVE_EBPF_JIT if X86_64
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FENTRY if X86_64 select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
......
...@@ -35,16 +35,13 @@ ...@@ -35,16 +35,13 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/page_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/ftrace.h>
#include <asm/irq_vectors.h> #include <asm/irq_vectors.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/export.h>
#include <asm/frame.h> #include <asm/frame.h>
.section .entry.text, "ax" .section .entry.text, "ax"
...@@ -585,7 +582,7 @@ ENTRY(iret_exc ) ...@@ -585,7 +582,7 @@ ENTRY(iret_exc )
* will soon execute iret and the tracer was already set to * will soon execute iret and the tracer was already set to
* the irqstate after the IRET: * the irqstate after the IRET:
*/ */
DISABLE_INTERRUPTS(CLBR_EAX) DISABLE_INTERRUPTS(CLBR_ANY)
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
jmp .Lrestore_nocheck jmp .Lrestore_nocheck
#endif #endif
...@@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, ...@@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
#endif /* CONFIG_HYPERV */ #endif /* CONFIG_HYPERV */
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
ret
END(mcount)
ENTRY(ftrace_caller)
pushl %eax
pushl %ecx
pushl %edx
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
ftrace_call:
call ftrace_stub
addl $4, %esp /* skip NULL pointer */
popl %edx
popl %ecx
popl %eax
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
jmp ftrace_stub
#endif
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
ret
END(ftrace_caller)
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location.
*/
pushl 4(%esp) /* save return ip into ip slot */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */
movl $__KERNEL_CS, 13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
call ftrace_stub
addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
movl %eax, 14*4(%esp) /* Put return ip back for ret */
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
jmp .Lftrace_ret
popf
jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)
cmpl $__PAGE_OFFSET, %esp
jb ftrace_stub /* Paging not enabled yet? */
cmpl $ftrace_stub, ftrace_trace_function
jnz .Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
jnz ftrace_graph_caller
#endif
.globl ftrace_stub
ftrace_stub:
ret
/* taken from glibc */
.Ltrace:
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
movl 0x4(%ebp), %edx
subl $MCOUNT_INSN_SIZE, %eax
call *ftrace_trace_function
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
lea 0x4(%ebp), %edx
movl (%ebp), %ecx
subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
popl %ecx
popl %eax
ret
END(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
pushl %eax
pushl %edx
movl %ebp, %eax
call ftrace_return_to_handler
movl %eax, %ecx
popl %edx
popl %eax
jmp *%ecx
#endif
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
ENTRY(trace_page_fault) ENTRY(trace_page_fault)
ASM_CLAC ASM_CLAC
......
...@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath: ...@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath:
* If we see that no exit work is required (which we are required * If we see that no exit work is required (which we are required
* to check with IRQs off), then we can go straight to SYSRET64. * to check with IRQs off), then we can go straight to SYSRET64.
*/ */
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
movq PER_CPU_VAR(current_task), %r11 movq PER_CPU_VAR(current_task), %r11
testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
...@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath: ...@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath:
* raise(3) will trigger this, for example. IRQs are off. * raise(3) will trigger this, for example. IRQs are off.
*/ */
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_ANY)
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
movq %rsp, %rdi movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */ call syscall_return_slowpath /* returns with IRQs disabled */
...@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64) ...@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64)
* Called from fast path -- disable IRQs again, pop return address * Called from fast path -- disable IRQs again, pop return address
* and jump to slow path * and jump to slow path
*/ */
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
popq %rax popq %rax
jmp entry_SYSCALL64_slow_path jmp entry_SYSCALL64_slow_path
...@@ -518,7 +518,7 @@ common_interrupt: ...@@ -518,7 +518,7 @@ common_interrupt:
interrupt do_IRQ interrupt do_IRQ
/* 0(%rsp): old RSP */ /* 0(%rsp): old RSP */
ret_from_intr: ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
...@@ -1051,7 +1051,7 @@ END(paranoid_entry) ...@@ -1051,7 +1051,7 @@ END(paranoid_entry)
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/ */
ENTRY(paranoid_exit) ENTRY(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */ testl %ebx, %ebx /* swapgs needed? */
jnz paranoid_exit_no_swapgs jnz paranoid_exit_no_swapgs
...@@ -1156,10 +1156,9 @@ END(error_entry) ...@@ -1156,10 +1156,9 @@ END(error_entry)
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/ */
ENTRY(error_exit) ENTRY(error_exit)
movl %ebx, %eax DISABLE_INTERRUPTS(CLBR_ANY)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl %eax, %eax testl %ebx, %ebx
jnz retint_kernel jnz retint_kernel
jmp retint_user jmp retint_user
END(error_exit) END(error_exit)
......
#ifndef _ASM_X86_BUG_H #ifndef _ASM_X86_BUG_H
#define _ASM_X86_BUG_H #define _ASM_X86_BUG_H
#define HAVE_ARCH_BUG #include <linux/stringify.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE /*
* Since some emulators terminate on UD2, we cannot use it for WARN.
* Since various instruction decoders disagree on the length of UD1,
* we cannot use it either. So use UD0 for WARN.
*
* (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
* our kernel decoder thinks it takes a ModRM byte, which seems consistent
* with various things like the Intel SDM instruction encoding rules)
*/
#define ASM_UD0 ".byte 0x0f, 0xff"
#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
#define ASM_UD2 ".byte 0x0f, 0x0b"
#define INSN_UD0 0xff0f
#define INSN_UD2 0x0b0f
#define LEN_UD0 2
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# define __BUG_C0 "2:\t.long 1b, %c0\n" # define __BUG_REL(val) ".long " __stringify(val)
#else #else
# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n" # define __BUG_REL(val) ".long " __stringify(val) " - 2b"
#endif #endif
#define BUG() \ #ifdef CONFIG_DEBUG_BUGVERBOSE
#define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("1:\tud2\n" \ asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"a\"\n" \
__BUG_C0 \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c1, 0\n" \ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.org 2b+%c2\n" \ "\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
".popsection" \ ".popsection" \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#else /* !CONFIG_DEBUG_BUGVERBOSE */
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"a\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
".popsection" \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \ "i" (sizeof(struct bug_entry))); \
unreachable(); \
} while (0) } while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else #else
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
#define HAVE_ARCH_BUG
#define BUG() \ #define BUG() \
do { \ do { \
asm volatile("ud2"); \ _BUG_FLAGS(ASM_UD2, 0); \
unreachable(); \ unreachable(); \
} while (0) } while (0)
#endif
#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
#include <asm-generic/bug.h> #include <asm-generic/bug.h>
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/page_64_types.h> #include <asm/page_64_types.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/alternative.h>
/* duplicated to the one in bootmem.h */ /* duplicated to the one in bootmem.h */
extern unsigned long max_pfn; extern unsigned long max_pfn;
...@@ -34,7 +35,20 @@ extern unsigned long __phys_addr_symbol(unsigned long); ...@@ -34,7 +35,20 @@ extern unsigned long __phys_addr_symbol(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_pfn) #define pfn_valid(pfn) ((pfn) < max_pfn)
#endif #endif
void clear_page(void *page); void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
static inline void clear_page(void *page)
{
alternative_call_2(clear_page_orig,
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
"=D" (page),
"0" (page)
: "memory", "rax", "rcx");
}
void copy_page(void *to, void *from); void copy_page(void *to, void *from);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -12,8 +12,10 @@ struct unwind_state { ...@@ -12,8 +12,10 @@ struct unwind_state {
struct task_struct *task; struct task_struct *task;
int graph_idx; int graph_idx;
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
bool got_irq;
unsigned long *bp, *orig_sp; unsigned long *bp, *orig_sp;
struct pt_regs *regs; struct pt_regs *regs;
unsigned long ip;
#else #else
unsigned long *sp; unsigned long *sp;
#endif #endif
......
...@@ -27,7 +27,7 @@ KASAN_SANITIZE_stacktrace.o := n ...@@ -27,7 +27,7 @@ KASAN_SANITIZE_stacktrace.o := n
OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o := y OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y
# If instrumentation of this dir is enabled, boot hangs during first second. # If instrumentation of this dir is enabled, boot hangs during first second.
...@@ -46,7 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o ...@@ -46,7 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o obj-y += probe_roms.o
obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o obj-$(CONFIG_X86_64) += sys_x86_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o obj-y += bootflag.o e820.o
...@@ -82,6 +82,7 @@ obj-y += apic/ ...@@ -82,6 +82,7 @@ obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_X86_TSC) += trace_clock.o
......
...@@ -77,7 +77,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, ...@@ -77,7 +77,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - softirq stack * - softirq stack
* - hardirq stack * - hardirq stack
*/ */
for (regs = NULL; stack; stack = stack_info.next_sp) { for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
const char *stack_name; const char *stack_name;
/* /*
...@@ -289,9 +289,6 @@ void die(const char *str, struct pt_regs *regs, long err) ...@@ -289,9 +289,6 @@ void die(const char *str, struct pt_regs *regs, long err)
unsigned long flags = oops_begin(); unsigned long flags = oops_begin();
int sig = SIGSEGV; int sig = SIGSEGV;
if (!user_mode(regs))
report_bug(regs->ip, regs);
if (__die(str, regs, err)) if (__die(str, regs, err))
sig = 0; sig = 0;
oops_end(flags, regs, sig); oops_end(flags, regs, sig);
......
...@@ -162,15 +162,3 @@ void show_regs(struct pt_regs *regs) ...@@ -162,15 +162,3 @@ void show_regs(struct pt_regs *regs)
} }
pr_cont("\n"); pr_cont("\n");
} }
int is_valid_bugaddr(unsigned long ip)
{
unsigned short ud2;
if (ip < PAGE_OFFSET)
return 0;
if (probe_kernel_address((unsigned short *)ip, ud2))
return 0;
return ud2 == 0x0b0f;
}
...@@ -178,13 +178,3 @@ void show_regs(struct pt_regs *regs) ...@@ -178,13 +178,3 @@ void show_regs(struct pt_regs *regs)
} }
pr_cont("\n"); pr_cont("\n");
} }
int is_valid_bugaddr(unsigned long ip)
{
unsigned short ud2;
if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
return 0;
return ud2 == 0x0b0f;
}
/*
* Copyright (C) 2017 Steven Rostedt, VMware Inc.
*/
#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/segment.h>
#include <asm/export.h>
#include <asm/ftrace.h>
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
EXPORT_SYMBOL(__fentry__)
#else
# define function_hook mcount
EXPORT_SYMBOL(mcount)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
# define USING_FRAME_POINTER
#endif
#ifdef USING_FRAME_POINTER
# define MCOUNT_FRAME 1 /* using frame = true */
#else
# define MCOUNT_FRAME 0 /* using frame = false */
#endif
ENTRY(function_hook)
ret
END(function_hook)
ENTRY(ftrace_caller)
#ifdef USING_FRAME_POINTER
# ifdef CC_USING_FENTRY
/*
* Frame pointers are of ip followed by bp.
* Since fentry is an immediate jump, we are left with
* parent-ip, function-ip. We need to add a frame with
* parent-ip followed by ebp.
*/
pushl 4(%esp) /* parent ip */
pushl %ebp
movl %esp, %ebp
pushl 2*4(%esp) /* function ip */
# endif
/* For mcount, the function ip is directly above */
pushl %ebp
movl %esp, %ebp
#endif
pushl %eax
pushl %ecx
pushl %edx
pushl $0 /* Pass NULL as regs pointer */
#ifdef USING_FRAME_POINTER
/* Load parent ebp into edx */
movl 4*4(%esp), %edx
#else
/* There's no frame pointer, load the appropriate stack addr instead */
lea 4*4(%esp), %edx
#endif
movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */
/* Get the parent ip */
movl 4(%edx), %edx /* edx has ebp */
movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
ftrace_call:
call ftrace_stub
addl $4, %esp /* skip NULL pointer */
popl %edx
popl %ecx
popl %eax
#ifdef USING_FRAME_POINTER
popl %ebp
# ifdef CC_USING_FENTRY
addl $4,%esp /* skip function ip */
popl %ebp /* this is the orig bp */
addl $4, %esp /* skip parent ip */
# endif
#endif
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
jmp ftrace_stub
#endif
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
ret
END(ftrace_caller)
ENTRY(ftrace_regs_caller)
/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* regs->ip location, and move flags into the return ip location.
*/
pushl $__KERNEL_CS
pushl 4(%esp) /* Save the return ip */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
/* Get flags and place them into the return ip slot */
pushf
popl %eax
movl %eax, 8*4(%esp)
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
#ifdef CC_USING_FENTRY
movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */
#else
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
#endif
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
call ftrace_stub
addl $4, %esp /* Skip pt_regs */
/* restore flags */
push 14*4(%esp)
popf
/* Move return ip back to its original location */
movl 12*4(%esp), %eax
movl %eax, 14*4(%esp)
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
/* use lea to not affect flags */
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
jmp .Lftrace_ret
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(function_hook)
cmpl $__PAGE_OFFSET, %esp
jb ftrace_stub /* Paging not enabled yet? */
cmpl $ftrace_stub, ftrace_trace_function
jnz .Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
jnz ftrace_graph_caller
#endif
.globl ftrace_stub
ftrace_stub:
ret
/* taken from glibc */
.Ltrace:
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
movl 0x4(%ebp), %edx
subl $MCOUNT_INSN_SIZE, %eax
call *ftrace_trace_function
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
pushl %eax
pushl %ecx
pushl %edx
movl 3*4(%esp), %eax
/* Even with frame pointers, fentry doesn't have one here */
#ifdef CC_USING_FENTRY
lea 4*4(%esp), %edx
movl $0, %ecx
#else
lea 0x4(%ebp), %edx
movl (%ebp), %ecx
#endif
subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
popl %ecx
popl %eax
ret
END(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
pushl %eax
pushl %edx
#ifdef CC_USING_FENTRY
movl $0, %eax
#else
movl %ebp, %eax
#endif
call ftrace_return_to_handler
movl %eax, %ecx
popl %edx
popl %eax
jmp *%ecx
#endif
/* /*
* linux/arch/x86_64/mcount_64.S
*
* Copyright (C) 2014 Steven Rostedt, Red Hat Inc * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
*/ */
...@@ -13,9 +11,6 @@ ...@@ -13,9 +11,6 @@
.code64 .code64
.section .entry.text, "ax" .section .entry.text, "ax"
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CC_USING_FENTRY #ifdef CC_USING_FENTRY
# define function_hook __fentry__ # define function_hook __fentry__
EXPORT_SYMBOL(__fentry__) EXPORT_SYMBOL(__fentry__)
...@@ -297,7 +292,6 @@ trace: ...@@ -297,7 +292,6 @@ trace:
jmp fgraph_trace jmp fgraph_trace
END(function_hook) END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
......
...@@ -169,6 +169,37 @@ void ist_end_non_atomic(void) ...@@ -169,6 +169,37 @@ void ist_end_non_atomic(void)
preempt_disable(); preempt_disable();
} }
int is_valid_bugaddr(unsigned long addr)
{
unsigned short ud;
if (addr < TASK_SIZE_MAX)
return 0;
if (probe_kernel_address((unsigned short *)addr, ud))
return 0;
return ud == INSN_UD0 || ud == INSN_UD2;
}
static int fixup_bug(struct pt_regs *regs, int trapnr)
{
if (trapnr != X86_TRAP_UD)
return 0;
switch (report_bug(regs->ip, regs)) {
case BUG_TRAP_TYPE_NONE:
case BUG_TRAP_TYPE_BUG:
break;
case BUG_TRAP_TYPE_WARN:
regs->ip += LEN_UD0;
return 1;
}
return 0;
}
static nokprobe_inline int static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
struct pt_regs *regs, long error_code) struct pt_regs *regs, long error_code)
...@@ -187,13 +218,16 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, ...@@ -187,13 +218,16 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
} }
if (!user_mode(regs)) { if (!user_mode(regs)) {
if (!fixup_exception(regs, trapnr)) { if (fixup_exception(regs, trapnr))
return 0;
if (fixup_bug(regs, trapnr))
return 0;
tsk->thread.error_code = error_code; tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr; tsk->thread.trap_nr = trapnr;
die(str, regs, error_code); die(str, regs, error_code);
} }
return 0;
}
return -1; return -1;
} }
......
This diff is collapsed.
...@@ -34,7 +34,7 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -34,7 +34,7 @@ bool unwind_next_frame(struct unwind_state *state)
return true; return true;
} }
state->sp = info->next_sp; state->sp = PTR_ALIGN(info->next_sp, sizeof(long));
} while (!get_stack_info(state->sp, state->task, info, } while (!get_stack_info(state->sp, state->task, info,
&state->stack_mask)); &state->stack_mask));
...@@ -49,7 +49,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, ...@@ -49,7 +49,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
state->task = task; state->task = task;
state->sp = first_frame; state->sp = PTR_ALIGN(first_frame, sizeof(long));
get_stack_info(first_frame, state->task, &state->stack_info, get_stack_info(first_frame, state->task, &state->stack_info,
&state->stack_mask); &state->stack_mask);
......
...@@ -146,6 +146,7 @@ SECTIONS ...@@ -146,6 +146,7 @@ SECTIONS
_edata = .; _edata = .;
} :data } :data
BUG_TABLE
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__vvar_page = .; __vvar_page = .;
......
...@@ -14,20 +14,15 @@ ...@@ -14,20 +14,15 @@
* Zero a page. * Zero a page.
* %rdi - page * %rdi - page
*/ */
ENTRY(clear_page) ENTRY(clear_page_rep)
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
"jmp clear_page_c_e", X86_FEATURE_ERMS
movl $4096/8,%ecx movl $4096/8,%ecx
xorl %eax,%eax xorl %eax,%eax
rep stosq rep stosq
ret ret
ENDPROC(clear_page) ENDPROC(clear_page_rep)
EXPORT_SYMBOL(clear_page) EXPORT_SYMBOL_GPL(clear_page_rep)
ENTRY(clear_page_orig) ENTRY(clear_page_orig)
xorl %eax,%eax xorl %eax,%eax
movl $4096/64,%ecx movl $4096/64,%ecx
.p2align 4 .p2align 4
...@@ -47,10 +42,12 @@ ENTRY(clear_page_orig) ...@@ -47,10 +42,12 @@ ENTRY(clear_page_orig)
nop nop
ret ret
ENDPROC(clear_page_orig) ENDPROC(clear_page_orig)
EXPORT_SYMBOL_GPL(clear_page_orig)
ENTRY(clear_page_c_e) ENTRY(clear_page_erms)
movl $4096,%ecx movl $4096,%ecx
xorl %eax,%eax xorl %eax,%eax
rep stosb rep stosb
ret ret
ENDPROC(clear_page_c_e) ENDPROC(clear_page_erms)
EXPORT_SYMBOL_GPL(clear_page_erms)
...@@ -8,7 +8,7 @@ else ...@@ -8,7 +8,7 @@ else
BITS := 64 BITS := 64
endif endif
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \ obj-y = bugs_$(BITS).o delay.o fault.o ldt.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \ ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
stub_$(BITS).o stub_segv.o \ stub_$(BITS).o stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \ sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
......
/*
* Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL V2
*/
#include <linux/uaccess.h>
/*
* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
* that's not relevant in skas mode.
*/
int is_valid_bugaddr(unsigned long eip)
{
unsigned short ud2;
if (probe_kernel_address((unsigned short __user *)eip, ud2))
return 0;
return ud2 == 0x0b0f;
}
...@@ -5,7 +5,9 @@ ...@@ -5,7 +5,9 @@
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
#define BUGFLAG_WARNING (1 << 0) #define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) #define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) #define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif #endif
...@@ -55,6 +57,18 @@ struct bug_entry { ...@@ -55,6 +57,18 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif #endif
#ifdef __WARN_FLAGS
#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
#define WARN_ON_ONCE(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_ONCE_TAINT(TAINT_WARN); \
unlikely(__ret_warn_on); \
})
#endif
/* /*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant issues that need prompt attention if they should ever * significant issues that need prompt attention if they should ever
...@@ -112,6 +126,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, ...@@ -112,6 +126,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
unlikely(__ret_warn_on); \ unlikely(__ret_warn_on); \
}) })
#ifndef WARN_ON_ONCE
#define WARN_ON_ONCE(condition) ({ \ #define WARN_ON_ONCE(condition) ({ \
static bool __section(.data.unlikely) __warned; \ static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \ int __ret_warn_once = !!(condition); \
...@@ -122,6 +137,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, ...@@ -122,6 +137,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
} \ } \
unlikely(__ret_warn_once); \ unlikely(__ret_warn_once); \
}) })
#endif
#define WARN_ONCE(condition, format...) ({ \ #define WARN_ONCE(condition, format...) ({ \
static bool __section(.data.unlikely) __warned; \ static bool __section(.data.unlikely) __warned; \
......
...@@ -287,8 +287,6 @@ ...@@ -287,8 +287,6 @@
*(.rodata1) \ *(.rodata1) \
} \ } \
\ \
BUG_TABLE \
\
/* PCI quirks */ \ /* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
...@@ -857,7 +855,8 @@ ...@@ -857,7 +855,8 @@
READ_MOSTLY_DATA(cacheline) \ READ_MOSTLY_DATA(cacheline) \
DATA_DATA \ DATA_DATA \
CONSTRUCTORS \ CONSTRUCTORS \
} } \
BUG_TABLE
#define INIT_TEXT_SECTION(inittext_align) \ #define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \ . = ALIGN(inittext_align); \
......
...@@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug) ...@@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING; return bug->flags & BUGFLAG_WARNING;
} }
const struct bug_entry *find_bug(unsigned long bugaddr); struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/rculist.h> #include <linux/rculist.h>
extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug) static inline unsigned long bug_addr(const struct bug_entry *bug)
{ {
...@@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) ...@@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
/* Updates are protected by module mutex */ /* Updates are protected by module mutex */
static LIST_HEAD(module_bug_list); static LIST_HEAD(module_bug_list);
static const struct bug_entry *module_find_bug(unsigned long bugaddr) static struct bug_entry *module_find_bug(unsigned long bugaddr)
{ {
struct module *mod; struct module *mod;
const struct bug_entry *bug = NULL; struct bug_entry *bug = NULL;
rcu_read_lock_sched(); rcu_read_lock_sched();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
...@@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod) ...@@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod)
#else #else
static inline const struct bug_entry *module_find_bug(unsigned long bugaddr) static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
{ {
return NULL; return NULL;
} }
#endif #endif
const struct bug_entry *find_bug(unsigned long bugaddr) struct bug_entry *find_bug(unsigned long bugaddr)
{ {
const struct bug_entry *bug; struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug_addr(bug)) if (bugaddr == bug_addr(bug))
...@@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr) ...@@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{ {
const struct bug_entry *bug; struct bug_entry *bug;
const char *file; const char *file;
unsigned line, warning; unsigned line, warning, once, done;
if (!is_valid_bugaddr(bugaddr)) if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE; return BUG_TRAP_TYPE_NONE;
...@@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) ...@@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
line = bug->line; line = bug->line;
#endif #endif
warning = (bug->flags & BUGFLAG_WARNING) != 0; warning = (bug->flags & BUGFLAG_WARNING) != 0;
once = (bug->flags & BUGFLAG_ONCE) != 0;
done = (bug->flags & BUGFLAG_DONE) != 0;
if (warning && once) {
if (done)
return BUG_TRAP_TYPE_WARN;
/*
* Since this is the only store, concurrency is not an issue.
*/
bug->flags |= BUGFLAG_DONE;
}
} }
if (warning) { if (warning) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment