Commit bf603339 authored by Will Deacon's avatar Will Deacon

Merge branch 'x86/asm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into for-next/asm

As agreed with Boris, merge in the 'x86/asm' branch from -tip so that we
can select the new 'ARCH_USE_SYM_ANNOTATIONS' Kconfig symbol, which is
required by the BTI kernel patches.

* 'x86/asm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/asm: Provide a Kconfig symbol for disabling old assembly annotations
  x86/32: Remove CONFIG_DOUBLEFAULT
parents 6a8b55ed 2ce0d7f9
...@@ -91,6 +91,7 @@ config X86 ...@@ -91,6 +91,7 @@ config X86
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64 select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_DYNAMIC_TASK_STRUCT
......
...@@ -99,15 +99,6 @@ config DEBUG_WX ...@@ -99,15 +99,6 @@ config DEBUG_WX
If in doubt, say "Y". If in doubt, say "Y".
config DOUBLEFAULT
default y
bool "Enable doublefault exception handler" if EXPERT && X86_32
---help---
This option allows trapping of rare doublefault exceptions that
would otherwise cause a system to silently reboot. Disabling this
option saves about 4k and might cause you much additional grey
hair.
config DEBUG_TLBFLUSH config DEBUG_TLBFLUSH
bool "Set upper limit of TLB entries to flush one-by-one" bool "Set upper limit of TLB entries to flush one-by-one"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
......
...@@ -1536,7 +1536,6 @@ SYM_CODE_START(debug) ...@@ -1536,7 +1536,6 @@ SYM_CODE_START(debug)
jmp common_exception jmp common_exception
SYM_CODE_END(debug) SYM_CODE_END(debug)
#ifdef CONFIG_DOUBLEFAULT
SYM_CODE_START(double_fault) SYM_CODE_START(double_fault)
1: 1:
/* /*
...@@ -1576,7 +1575,6 @@ SYM_CODE_START(double_fault) ...@@ -1576,7 +1575,6 @@ SYM_CODE_START(double_fault)
hlt hlt
jmp 1b jmp 1b
SYM_CODE_END(double_fault) SYM_CODE_END(double_fault)
#endif
/* /*
* NMI is doubly nasty. It can happen on the first instruction of * NMI is doubly nasty. It can happen on the first instruction of
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_X86_DOUBLEFAULT_H #ifndef _ASM_X86_DOUBLEFAULT_H
#define _ASM_X86_DOUBLEFAULT_H #define _ASM_X86_DOUBLEFAULT_H
#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT) #ifdef CONFIG_X86_32
extern void doublefault_init_cpu_tss(void); extern void doublefault_init_cpu_tss(void);
#else #else
static inline void doublefault_init_cpu_tss(void) static inline void doublefault_init_cpu_tss(void)
......
...@@ -69,9 +69,7 @@ dotraplinkage void do_overflow(struct pt_regs *regs, long error_code); ...@@ -69,9 +69,7 @@ dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code); dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code); dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code); dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2); dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
#endif
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code); dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code); dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code); dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
......
...@@ -102,9 +102,7 @@ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o ...@@ -102,9 +102,7 @@ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-y += kprobes/ obj-y += kprobes/
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
ifeq ($(CONFIG_X86_32),y) obj-$(CONFIG_X86_32) += doublefault_32.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
endif
obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_VM86) += vm86_32.o obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
......
...@@ -87,7 +87,6 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) ...@@ -87,7 +87,6 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info) static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
{ {
#ifdef CONFIG_DOUBLEFAULT
struct cpu_entry_area *cea = get_cpu_entry_area(raw_smp_processor_id()); struct cpu_entry_area *cea = get_cpu_entry_area(raw_smp_processor_id());
struct doublefault_stack *ss = &cea->doublefault_stack; struct doublefault_stack *ss = &cea->doublefault_stack;
...@@ -103,9 +102,6 @@ static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info) ...@@ -103,9 +102,6 @@ static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
info->next_sp = (unsigned long *)this_cpu_read(cpu_tss_rw.x86_tss.sp); info->next_sp = (unsigned long *)this_cpu_read(cpu_tss_rw.x86_tss.sp);
return true; return true;
#else
return false;
#endif
} }
......
...@@ -326,7 +326,6 @@ __visible void __noreturn handle_stack_overflow(const char *message, ...@@ -326,7 +326,6 @@ __visible void __noreturn handle_stack_overflow(const char *message,
} }
#endif #endif
#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
/* /*
* Runs on an IST stack for x86_64 and on a special task stack for x86_32. * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
* *
...@@ -450,7 +449,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign ...@@ -450,7 +449,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
die("double fault", regs, error_code); die("double fault", regs, error_code);
panic("Machine halted."); panic("Machine halted.");
} }
#endif
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
{ {
......
...@@ -17,7 +17,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); ...@@ -17,7 +17,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif #endif
#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT) #ifdef CONFIG_X86_32
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack); DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
#endif #endif
...@@ -114,12 +114,10 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) ...@@ -114,12 +114,10 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
#else #else
static inline void percpu_setup_exception_stacks(unsigned int cpu) static inline void percpu_setup_exception_stacks(unsigned int cpu)
{ {
#ifdef CONFIG_DOUBLEFAULT
struct cpu_entry_area *cea = get_cpu_entry_area(cpu); struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
cea_map_percpu_pages(&cea->doublefault_stack, cea_map_percpu_pages(&cea->doublefault_stack,
&per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL); &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
#endif
} }
#endif #endif
......
...@@ -105,7 +105,7 @@ ...@@ -105,7 +105,7 @@
/* === DEPRECATED annotations === */ /* === DEPRECATED annotations === */
#ifndef CONFIG_X86 #ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef GLOBAL #ifndef GLOBAL
/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ /* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \ #define GLOBAL(name) \
...@@ -118,10 +118,10 @@ ...@@ -118,10 +118,10 @@
#define ENTRY(name) \ #define ENTRY(name) \
SYM_FUNC_START(name) SYM_FUNC_START(name)
#endif #endif
#endif /* CONFIG_X86 */ #endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
#endif /* LINKER_SCRIPT */ #endif /* LINKER_SCRIPT */
#ifndef CONFIG_X86 #ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef WEAK #ifndef WEAK
/* deprecated, use SYM_FUNC_START_WEAK* */ /* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \ #define WEAK(name) \
...@@ -143,7 +143,7 @@ ...@@ -143,7 +143,7 @@
#define ENDPROC(name) \ #define ENDPROC(name) \
SYM_FUNC_END(name) SYM_FUNC_END(name)
#endif #endif
#endif /* CONFIG_X86 */ #endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
/* === generic annotations === */ /* === generic annotations === */
......
...@@ -80,6 +80,9 @@ config ARCH_USE_CMPXCHG_LOCKREF ...@@ -80,6 +80,9 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER config ARCH_HAS_FAST_MULTIPLIER
bool bool
config ARCH_USE_SYM_ANNOTATIONS
bool
config INDIRECT_PIO config INDIRECT_PIO
bool "Access I/O in non-MMIO mode" bool "Access I/O in non-MMIO mode"
depends on ARM64 depends on ARM64
......
...@@ -58,7 +58,6 @@ CONFIG_RCU_EQS_DEBUG=y ...@@ -58,7 +58,6 @@ CONFIG_RCU_EQS_DEBUG=y
CONFIG_USER_STACKTRACE_SUPPORT=y CONFIG_USER_STACKTRACE_SUPPORT=y
CONFIG_DEBUG_SG=y CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DOUBLEFAULT=y
CONFIG_X86_DEBUG_FPU=y CONFIG_X86_DEBUG_FPU=y
CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_PAGEALLOC=y
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment