Commit 2aff7c70 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull objtool updates from Ingo Molnar:

 - Mark arch_cpu_idle_dead() __noreturn, make all architectures &
   drivers that did this inconsistently follow this new, common
   convention, and fix all the fallout that objtool can now detect
   statically

 - Fix/improve the ORC unwinder becoming unreliable due to
   UNWIND_HINT_EMPTY ambiguity, split it into UNWIND_HINT_END_OF_STACK
   and UNWIND_HINT_UNDEFINED to resolve it

 - Fix noinstr violations in the KCSAN code and the lkdtm/stackleak code

 - Generate ORC data for __pfx code

 - Add more __noreturn annotations to various kernel startup/shutdown
   and panic functions

 - Misc improvements & fixes

* tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
  x86/hyperv: Mark hv_ghcb_terminate() as noreturn
  scsi: message: fusion: Mark mpt_halt_firmware() __noreturn
  x86/cpu: Mark {hlt,resume}_play_dead() __noreturn
  btrfs: Mark btrfs_assertfail() __noreturn
  objtool: Include weak functions in global_noreturns check
  cpu: Mark nmi_panic_self_stop() __noreturn
  cpu: Mark panic_smp_self_stop() __noreturn
  arm64/cpu: Mark cpu_park_loop() and friends __noreturn
  x86/head: Mark *_start_kernel() __noreturn
  init: Mark start_kernel() __noreturn
  init: Mark [arch_call_]rest_init() __noreturn
  objtool: Generate ORC data for __pfx code
  x86/linkage: Fix padding for typed functions
  objtool: Separate prefix code from stack validation code
  objtool: Remove superfluous dead_end_function() check
  objtool: Add symbol iteration helpers
  objtool: Add WARN_INSN()
  scripts/objdump-func: Support multiple functions
  context_tracking: Fix KCSAN noinstr violation
  objtool: Add stackleak instrumentation to uaccess safe list
  ...
parents 22b8cc3e 611d4c71
...@@ -183,7 +183,7 @@ trampoline or return trampoline. For example, considering the x86_64 ...@@ -183,7 +183,7 @@ trampoline or return trampoline. For example, considering the x86_64
.. code-block:: none .. code-block:: none
SYM_CODE_START(return_to_handler) SYM_CODE_START(return_to_handler)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
subq $24, %rsp subq $24, %rsp
/* Save the return values */ /* Save the return values */
......
...@@ -15180,8 +15180,8 @@ OBJTOOL ...@@ -15180,8 +15180,8 @@ OBJTOOL
M: Josh Poimboeuf <jpoimboe@kernel.org> M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Peter Zijlstra <peterz@infradead.org> M: Peter Zijlstra <peterz@infradead.org>
S: Supported S: Supported
F: include/linux/objtool*.h
F: tools/objtool/ F: tools/objtool/
F: include/linux/objtool.h
OCELOT ETHERNET SWITCH DRIVER OCELOT ETHERNET SWITCH DRIVER
M: Vladimir Oltean <vladimir.oltean@nxp.com> M: Vladimir Oltean <vladimir.oltean@nxp.com>
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* This file handles the architecture-dependent parts of process handling. * This file handles the architecture-dependent parts of process handling.
*/ */
#include <linux/cpu.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -59,9 +60,10 @@ void arch_cpu_idle(void) ...@@ -59,9 +60,10 @@ void arch_cpu_idle(void)
wtint(0); wtint(0);
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
wtint(INT_MAX); wtint(INT_MAX);
BUG();
} }
#endif /* ALPHA_WTINT */ #endif /* ALPHA_WTINT */
......
...@@ -320,7 +320,7 @@ void __cpu_die(unsigned int cpu) ...@@ -320,7 +320,7 @@ void __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming * of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this. * out of idle fixes this.
*/ */
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -382,6 +382,8 @@ void arch_cpu_idle_dead(void) ...@@ -382,6 +382,8 @@ void arch_cpu_idle_dead(void)
: "r" (task_stack_page(current) + THREAD_SIZE - 8), : "r" (task_stack_page(current) + THREAD_SIZE - 8),
"r" (current) "r" (current)
: "r0"); : "r0");
unreachable();
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
...@@ -777,7 +779,7 @@ void smp_send_stop(void) ...@@ -777,7 +779,7 @@ void smp_send_stop(void)
* kdump fails. So split out the panic_smp_self_stop() and add * kdump fails. So split out the panic_smp_self_stop() and add
* set_cpu_online(smp_processor_id(), false). * set_cpu_online(smp_processor_id(), false).
*/ */
void panic_smp_self_stop(void) void __noreturn panic_smp_self_stop(void)
{ {
pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
smp_processor_id()); smp_processor_id());
......
...@@ -31,7 +31,7 @@ static inline unsigned long disr_to_esr(u64 disr) ...@@ -31,7 +31,7 @@ static inline unsigned long disr_to_esr(u64 disr)
return esr; return esr;
} }
asmlinkage void handle_bad_stack(struct pt_regs *regs); asmlinkage void __noreturn handle_bad_stack(struct pt_regs *regs);
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs); asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs); asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
...@@ -80,5 +80,5 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr); ...@@ -80,5 +80,5 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */ #endif /* __ASM_EXCEPTION_H */
...@@ -100,10 +100,10 @@ static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask) ...@@ -100,10 +100,10 @@ static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
extern int __cpu_disable(void); extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu); extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void); extern void __noreturn cpu_die(void);
extern void cpu_die_early(void); extern void __noreturn cpu_die_early(void);
static inline void cpu_park_loop(void) static inline void __noreturn cpu_park_loop(void)
{ {
for (;;) { for (;;) {
wfe(); wfe();
...@@ -123,7 +123,7 @@ static inline void update_cpu_boot_status(int val) ...@@ -123,7 +123,7 @@ static inline void update_cpu_boot_status(int val)
* which calls for a kernel panic. Update the boot status and park the calling * which calls for a kernel panic. Update the boot status and park the calling
* CPU. * CPU.
*/ */
static inline void cpu_panic_kernel(void) static inline void __noreturn cpu_panic_kernel(void)
{ {
update_cpu_boot_status(CPU_PANIC_KERNEL); update_cpu_boot_status(CPU_PANIC_KERNEL);
cpu_park_loop(); cpu_park_loop();
...@@ -143,7 +143,6 @@ bool cpus_are_stuck_in_kernel(void); ...@@ -143,7 +143,6 @@ bool cpus_are_stuck_in_kernel(void);
extern void crash_smp_send_stop(void); extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void); extern bool smp_crash_stop_failed(void);
extern void panic_smp_self_stop(void);
#endif /* ifndef __ASSEMBLY__ */ #endif /* ifndef __ASSEMBLY__ */
......
...@@ -840,7 +840,7 @@ UNHANDLED(el0t, 32, error) ...@@ -840,7 +840,7 @@ UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
......
...@@ -69,7 +69,7 @@ void (*pm_power_off)(void); ...@@ -69,7 +69,7 @@ void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off); EXPORT_SYMBOL_GPL(pm_power_off);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
cpu_die(); cpu_die();
} }
......
...@@ -361,7 +361,7 @@ void __cpu_die(unsigned int cpu) ...@@ -361,7 +361,7 @@ void __cpu_die(unsigned int cpu)
* Called from the idle thread for the CPU which has been shutdown. * Called from the idle thread for the CPU which has been shutdown.
* *
*/ */
void cpu_die(void) void __noreturn cpu_die(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
const struct cpu_operations *ops = get_cpu_ops(cpu); const struct cpu_operations *ops = get_cpu_ops(cpu);
...@@ -398,7 +398,7 @@ static void __cpu_try_die(int cpu) ...@@ -398,7 +398,7 @@ static void __cpu_try_die(int cpu)
* Kill the calling secondary CPU, early in bringup before it is turned * Kill the calling secondary CPU, early in bringup before it is turned
* online. * online.
*/ */
void cpu_die_early(void) void __noreturn cpu_die_early(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -816,7 +816,7 @@ void arch_irq_work_raise(void) ...@@ -816,7 +816,7 @@ void arch_irq_work_raise(void)
} }
#endif #endif
static void local_cpu_stop(void) static void __noreturn local_cpu_stop(void)
{ {
set_cpu_online(smp_processor_id(), false); set_cpu_online(smp_processor_id(), false);
...@@ -830,7 +830,7 @@ static void local_cpu_stop(void) ...@@ -830,7 +830,7 @@ static void local_cpu_stop(void)
* that cpu_online_mask gets correctly updated and smp_send_stop() can skip * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
* CPUs that have already stopped themselves. * CPUs that have already stopped themselves.
*/ */
void panic_smp_self_stop(void) void __noreturn panic_smp_self_stop(void)
{ {
local_cpu_stop(); local_cpu_stop();
} }
...@@ -839,7 +839,7 @@ void panic_smp_self_stop(void) ...@@ -839,7 +839,7 @@ void panic_smp_self_stop(void)
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
#endif #endif
static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{ {
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
crash_save_cpu(regs, cpu); crash_save_cpu(regs, cpu);
...@@ -854,6 +854,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) ...@@ -854,6 +854,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
/* just in case */ /* just in case */
cpu_park_loop(); cpu_park_loop();
#else
BUG();
#endif #endif
} }
......
...@@ -863,7 +863,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) ...@@ -863,7 +863,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16); __aligned(16);
void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far)
{ {
unsigned long tsk_stk = (unsigned long)current->stack; unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
...@@ -905,7 +905,6 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) ...@@ -905,7 +905,6 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
nmi_panic(regs, "Asynchronous SError Interrupt"); nmi_panic(regs, "Asynchronous SError Interrupt");
cpu_park_loop(); cpu_park_loop();
unreachable();
} }
bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
......
...@@ -300,7 +300,7 @@ void __cpu_die(unsigned int cpu) ...@@ -300,7 +300,7 @@ void __cpu_die(unsigned int cpu)
pr_notice("CPU%u: shutdown\n", cpu); pr_notice("CPU%u: shutdown\n", cpu);
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
idle_task_exit(); idle_task_exit();
...@@ -317,5 +317,7 @@ void arch_cpu_idle_dead(void) ...@@ -317,5 +317,7 @@ void arch_cpu_idle_dead(void)
"jmpi csky_start_secondary" "jmpi csky_start_secondary"
: :
: "r" (secondary_stack)); : "r" (secondary_stack));
BUG();
} }
#endif #endif
...@@ -201,7 +201,7 @@ __setup("nohalt", nohalt_setup); ...@@ -201,7 +201,7 @@ __setup("nohalt", nohalt_setup);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* We don't actually take CPU down, just spin without interrupts. */ /* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void) static inline void __noreturn play_dead(void)
{ {
unsigned int this_cpu = smp_processor_id(); unsigned int this_cpu = smp_processor_id();
...@@ -219,13 +219,13 @@ static inline void play_dead(void) ...@@ -219,13 +219,13 @@ static inline void play_dead(void)
BUG(); BUG();
} }
#else #else
static inline void play_dead(void) static inline void __noreturn play_dead(void)
{ {
BUG(); BUG();
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
play_dead(); play_dead();
} }
......
...@@ -99,7 +99,7 @@ static inline void __cpu_die(unsigned int cpu) ...@@ -99,7 +99,7 @@ static inline void __cpu_die(unsigned int cpu)
loongson_cpu_die(cpu); loongson_cpu_die(cpu);
} }
extern void play_dead(void); extern void __noreturn play_dead(void);
#endif #endif
#endif /* __ASM_SMP_H */ #endif /* __ASM_SMP_H */
...@@ -62,7 +62,7 @@ unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; ...@@ -62,7 +62,7 @@ unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override); EXPORT_SYMBOL(boot_option_idle_override);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
play_dead(); play_dead();
} }
......
...@@ -336,7 +336,7 @@ void play_dead(void) ...@@ -336,7 +336,7 @@ void play_dead(void)
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR); iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
init_fn(); init_fn();
unreachable(); BUG();
} }
#endif #endif
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/smp.h>
#include <asm/octeon/octeon.h> #include <asm/octeon/octeon.h>
......
...@@ -88,7 +88,7 @@ static inline void __cpu_die(unsigned int cpu) ...@@ -88,7 +88,7 @@ static inline void __cpu_die(unsigned int cpu)
mp_ops->cpu_die(cpu); mp_ops->cpu_die(cpu);
} }
extern void play_dead(void); extern void __noreturn play_dead(void);
#endif #endif
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
play_dead(); play_dead();
} }
......
...@@ -54,6 +54,8 @@ static void bmips_set_reset_vec(int cpu, u32 val); ...@@ -54,6 +54,8 @@ static void bmips_set_reset_vec(int cpu, u32 val);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <asm/smp.h>
/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
unsigned long bmips_smp_boot_sp; unsigned long bmips_smp_boot_sp;
unsigned long bmips_smp_boot_gp; unsigned long bmips_smp_boot_gp;
...@@ -413,6 +415,8 @@ void __ref play_dead(void) ...@@ -413,6 +415,8 @@ void __ref play_dead(void)
" wait\n" " wait\n"
" j bmips_secondary_reentry\n" " j bmips_secondary_reentry\n"
: : : "memory"); : : : "memory");
BUG();
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/pm-cps.h> #include <asm/pm-cps.h>
#include <asm/r4kcache.h> #include <asm/r4kcache.h>
#include <asm/smp.h>
#include <asm/smp-cps.h> #include <asm/smp-cps.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/uasm.h> #include <asm/uasm.h>
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/smp.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -808,6 +809,7 @@ void play_dead(void) ...@@ -808,6 +809,7 @@ void play_dead(void)
state_addr = &per_cpu(cpu_state, cpu); state_addr = &per_cpu(cpu_state, cpu);
mb(); mb();
play_dead_at_ckseg1(state_addr); play_dead_at_ckseg1(state_addr);
BUG();
} }
static int loongson3_disable_clock(unsigned int cpu) static int loongson3_disable_clock(unsigned int cpu)
......
...@@ -159,7 +159,7 @@ EXPORT_SYMBOL(running_on_qemu); ...@@ -159,7 +159,7 @@ EXPORT_SYMBOL(running_on_qemu);
/* /*
* Called from the idle thread for the CPU which has been shutdown. * Called from the idle thread for the CPU which has been shutdown.
*/ */
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
idle_task_exit(); idle_task_exit();
......
...@@ -67,7 +67,7 @@ void start_secondary(void *unused); ...@@ -67,7 +67,7 @@ void start_secondary(void *unused);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void); extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void); extern void __noreturn start_secondary_resume(void);
extern void smp_generic_give_timebase(void); extern void smp_generic_give_timebase(void);
extern void smp_generic_take_timebase(void); extern void smp_generic_take_timebase(void);
......
...@@ -480,7 +480,7 @@ void early_setup_secondary(void) ...@@ -480,7 +480,7 @@ void early_setup_secondary(void)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
void panic_smp_self_stop(void) void __noreturn panic_smp_self_stop(void)
{ {
hard_irq_disable(); hard_irq_disable();
spin_begin(); spin_begin();
......
...@@ -1752,7 +1752,7 @@ void __cpu_die(unsigned int cpu) ...@@ -1752,7 +1752,7 @@ void __cpu_die(unsigned int cpu)
smp_ops->cpu_die(cpu); smp_ops->cpu_die(cpu);
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
/* /*
* Disable on the down path. This will be re-enabled by * Disable on the down path. This will be re-enabled by
......
...@@ -72,7 +72,7 @@ void __cpu_die(unsigned int cpu) ...@@ -72,7 +72,7 @@ void __cpu_die(unsigned int cpu)
/* /*
* Called from the idle thread for the CPU which has been shutdown. * Called from the idle thread for the CPU which has been shutdown.
*/ */
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
idle_task_exit(); idle_task_exit();
......
...@@ -88,7 +88,7 @@ void arch_cpu_idle_exit(void) ...@@ -88,7 +88,7 @@ void arch_cpu_idle_exit(void)
{ {
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
cpu_die(); cpu_die();
} }
...@@ -396,7 +396,7 @@ int __init arch_early_irq_init(void) ...@@ -396,7 +396,7 @@ int __init arch_early_irq_init(void)
return 0; return 0;
} }
void __init arch_call_rest_init(void) void __init __noreturn arch_call_rest_init(void)
{ {
unsigned long stack; unsigned long stack;
......
...@@ -24,9 +24,10 @@ static inline void plat_smp_setup(void) ...@@ -24,9 +24,10 @@ static inline void plat_smp_setup(void)
mp_ops->smp_setup(); mp_ops->smp_setup();
} }
static inline void play_dead(void) static inline void __noreturn play_dead(void)
{ {
mp_ops->play_dead(); mp_ops->play_dead();
BUG();
} }
extern void register_smp_ops(struct plat_smp_ops *ops); extern void register_smp_ops(struct plat_smp_ops *ops);
...@@ -42,7 +43,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) ...@@ -42,7 +43,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops)
{ {
} }
static inline void play_dead(void) static inline void __noreturn play_dead(void)
{ {
BUG(); BUG();
} }
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* *
* Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2002 - 2009 Paul Mundt
*/ */
#include <linux/cpu.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -29,7 +30,7 @@ void default_idle(void) ...@@ -29,7 +30,7 @@ void default_idle(void)
clear_bl_bit(); clear_bl_bit();
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
play_dead(); play_dead();
} }
......
...@@ -49,7 +49,7 @@ int hard_smp_processor_id(void); ...@@ -49,7 +49,7 @@ int hard_smp_processor_id(void);
void smp_fill_in_cpu_possible_map(void); void smp_fill_in_cpu_possible_map(void);
void smp_fill_in_sib_core_maps(void); void smp_fill_in_sib_core_maps(void);
void cpu_play_dead(void); void __noreturn cpu_play_dead(void);
void smp_fetch_global_regs(void); void smp_fetch_global_regs(void);
void smp_fetch_global_pmu(void); void smp_fetch_global_pmu(void);
......
...@@ -95,7 +95,7 @@ void arch_cpu_idle(void) ...@@ -95,7 +95,7 @@ void arch_cpu_idle(void)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
sched_preempt_enable_no_resched(); sched_preempt_enable_no_resched();
cpu_play_dead(); cpu_play_dead();
......
...@@ -205,7 +205,7 @@ syscall_return_via_sysret: ...@@ -205,7 +205,7 @@ syscall_return_via_sysret:
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
pushq RSP-RDI(%rdi) /* RSP */ pushq RSP-RDI(%rdi) /* RSP */
pushq (%rdi) /* RDI */ pushq (%rdi) /* RDI */
...@@ -286,7 +286,7 @@ SYM_FUNC_END(__switch_to_asm) ...@@ -286,7 +286,7 @@ SYM_FUNC_END(__switch_to_asm)
.pushsection .text, "ax" .pushsection .text, "ax"
__FUNC_ALIGN __FUNC_ALIGN
SYM_CODE_START_NOALIGN(ret_from_fork) SYM_CODE_START_NOALIGN(ret_from_fork)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR // copy_thread ANNOTATE_NOENDBR // copy_thread
CALL_DEPTH_ACCOUNT CALL_DEPTH_ACCOUNT
movq %rax, %rdi movq %rax, %rdi
...@@ -303,7 +303,7 @@ SYM_CODE_START_NOALIGN(ret_from_fork) ...@@ -303,7 +303,7 @@ SYM_CODE_START_NOALIGN(ret_from_fork)
1: 1:
/* kernel thread */ /* kernel thread */
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
movq %r12, %rdi movq %r12, %rdi
CALL_NOSPEC rbx CALL_NOSPEC rbx
/* /*
...@@ -388,9 +388,9 @@ SYM_CODE_START(\asmsym) ...@@ -388,9 +388,9 @@ SYM_CODE_START(\asmsym)
.if \vector == X86_TRAP_BP .if \vector == X86_TRAP_BP
/* #BP advances %rip to the next instruction */ /* #BP advances %rip to the next instruction */
UNWIND_HINT_IRET_REGS offset=\has_error_code*8 signal=0 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
.else .else
UNWIND_HINT_IRET_REGS offset=\has_error_code*8 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
.endif .endif
ENDBR ENDBR
...@@ -461,7 +461,7 @@ SYM_CODE_END(\asmsym) ...@@ -461,7 +461,7 @@ SYM_CODE_END(\asmsym)
*/ */
.macro idtentry_mce_db vector asmsym cfunc .macro idtentry_mce_db vector asmsym cfunc
SYM_CODE_START(\asmsym) SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_ENTRY
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld cld
...@@ -518,7 +518,7 @@ SYM_CODE_END(\asmsym) ...@@ -518,7 +518,7 @@ SYM_CODE_END(\asmsym)
*/ */
.macro idtentry_vc vector asmsym cfunc .macro idtentry_vc vector asmsym cfunc
SYM_CODE_START(\asmsym) SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_ENTRY
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld cld
...@@ -582,7 +582,7 @@ SYM_CODE_END(\asmsym) ...@@ -582,7 +582,7 @@ SYM_CODE_END(\asmsym)
*/ */
.macro idtentry_df vector asmsym cfunc .macro idtentry_df vector asmsym cfunc
SYM_CODE_START(\asmsym) SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS offset=8 UNWIND_HINT_IRET_ENTRY offset=8
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld cld
...@@ -643,7 +643,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) ...@@ -643,7 +643,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
/* Copy the IRET frame to the trampoline stack. */ /* Copy the IRET frame to the trampoline stack. */
pushq 6*8(%rdi) /* SS */ pushq 6*8(%rdi) /* SS */
...@@ -869,7 +869,7 @@ SYM_CODE_END(exc_xen_hypervisor_callback) ...@@ -869,7 +869,7 @@ SYM_CODE_END(exc_xen_hypervisor_callback)
*/ */
__FUNC_ALIGN __FUNC_ALIGN
SYM_CODE_START_NOALIGN(xen_failsafe_callback) SYM_CODE_START_NOALIGN(xen_failsafe_callback)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ENDBR ENDBR
movl %ds, %ecx movl %ds, %ecx
cmpw %cx, 0x10(%rsp) cmpw %cx, 0x10(%rsp)
...@@ -1107,7 +1107,7 @@ SYM_CODE_START(error_entry) ...@@ -1107,7 +1107,7 @@ SYM_CODE_START(error_entry)
FENCE_SWAPGS_KERNEL_ENTRY FENCE_SWAPGS_KERNEL_ENTRY
CALL_DEPTH_ACCOUNT CALL_DEPTH_ACCOUNT
leaq 8(%rsp), %rax /* return pt_regs pointer */ leaq 8(%rsp), %rax /* return pt_regs pointer */
ANNOTATE_UNRET_END VALIDATE_UNRET_END
RET RET
.Lbstep_iret: .Lbstep_iret:
...@@ -1153,7 +1153,7 @@ SYM_CODE_END(error_return) ...@@ -1153,7 +1153,7 @@ SYM_CODE_END(error_return)
* when PAGE_TABLE_ISOLATION is in use. Do not clobber. * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/ */
SYM_CODE_START(asm_exc_nmi) SYM_CODE_START(asm_exc_nmi)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_ENTRY
ENDBR ENDBR
/* /*
...@@ -1520,7 +1520,7 @@ SYM_CODE_END(asm_exc_nmi) ...@@ -1520,7 +1520,7 @@ SYM_CODE_END(asm_exc_nmi)
* MSRs to fully disable 32-bit SYSCALL. * MSRs to fully disable 32-bit SYSCALL.
*/ */
SYM_CODE_START(ignore_sysret) SYM_CODE_START(ignore_sysret)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ENDBR ENDBR
mov $-ENOSYS, %eax mov $-ENOSYS, %eax
sysretl sysretl
......
...@@ -129,7 +129,7 @@ static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code, ...@@ -129,7 +129,7 @@ static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
return ES_OK; return ES_OK;
} }
void hv_ghcb_terminate(unsigned int set, unsigned int reason) void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
{ {
u64 val = GHCB_MSR_TERM_REQ; u64 val = GHCB_MSR_TERM_REQ;
......
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
/* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */ /* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
#define SYM_TYPED_FUNC_START(name) \ #define SYM_TYPED_FUNC_START(name) \
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
ENDBR ENDBR
/* SYM_FUNC_START -- use for global functions */ /* SYM_FUNC_START -- use for global functions */
......
...@@ -228,7 +228,7 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); ...@@ -228,7 +228,7 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
void hv_ghcb_msr_write(u64 msr, u64 value); void hv_ghcb_msr_write(u64 msr, u64 value);
void hv_ghcb_msr_read(u64 msr, u64 *value); void hv_ghcb_msr_read(u64 msr, u64 *value);
bool hv_ghcb_negotiate_protocol(void); bool hv_ghcb_negotiate_protocol(void);
void hv_ghcb_terminate(unsigned int set, unsigned int reason); void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
void hv_vtom_init(void); void hv_vtom_init(void);
#else #else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
......
...@@ -194,9 +194,9 @@ ...@@ -194,9 +194,9 @@
* builds. * builds.
*/ */
.macro ANNOTATE_RETPOLINE_SAFE .macro ANNOTATE_RETPOLINE_SAFE
.Lannotate_\@: .Lhere_\@:
.pushsection .discard.retpoline_safe .pushsection .discard.retpoline_safe
_ASM_PTR .Lannotate_\@ .long .Lhere_\@ - .
.popsection .popsection
.endm .endm
...@@ -210,8 +210,8 @@ ...@@ -210,8 +210,8 @@
* Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
* eventually turn into it's own annotation. * eventually turn into it's own annotation.
*/ */
.macro ANNOTATE_UNRET_END .macro VALIDATE_UNRET_END
#ifdef CONFIG_DEBUG_ENTRY #if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
ANNOTATE_RETPOLINE_SAFE ANNOTATE_RETPOLINE_SAFE
nop nop
#endif #endif
...@@ -286,7 +286,7 @@ ...@@ -286,7 +286,7 @@
.macro UNTRAIN_RET .macro UNTRAIN_RET
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
defined(CONFIG_CALL_DEPTH_TRACKING) defined(CONFIG_CALL_DEPTH_TRACKING)
ANNOTATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
...@@ -297,7 +297,7 @@ ...@@ -297,7 +297,7 @@
.macro UNTRAIN_RET_FROM_CALL .macro UNTRAIN_RET_FROM_CALL
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
defined(CONFIG_CALL_DEPTH_TRACKING) defined(CONFIG_CALL_DEPTH_TRACKING)
ANNOTATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
...@@ -318,7 +318,7 @@ ...@@ -318,7 +318,7 @@
#define ANNOTATE_RETPOLINE_SAFE \ #define ANNOTATE_RETPOLINE_SAFE \
"999:\n\t" \ "999:\n\t" \
".pushsection .discard.retpoline_safe\n\t" \ ".pushsection .discard.retpoline_safe\n\t" \
_ASM_PTR " 999b\n\t" \ ".long 999b - .\n\t" \
".popsection\n\t" ".popsection\n\t"
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
......
...@@ -39,6 +39,12 @@ ...@@ -39,6 +39,12 @@
#define ORC_REG_SP_INDIRECT 9 #define ORC_REG_SP_INDIRECT 9
#define ORC_REG_MAX 15 #define ORC_REG_MAX 15
#define ORC_TYPE_UNDEFINED 0
#define ORC_TYPE_END_OF_STACK 1
#define ORC_TYPE_CALL 2
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -56,16 +62,14 @@ struct orc_entry { ...@@ -56,16 +62,14 @@ struct orc_entry {
#if defined(__LITTLE_ENDIAN_BITFIELD) #if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned sp_reg:4; unsigned sp_reg:4;
unsigned bp_reg:4; unsigned bp_reg:4;
unsigned type:2; unsigned type:3;
unsigned signal:1; unsigned signal:1;
unsigned end:1;
#elif defined(__BIG_ENDIAN_BITFIELD) #elif defined(__BIG_ENDIAN_BITFIELD)
unsigned bp_reg:4; unsigned bp_reg:4;
unsigned sp_reg:4; unsigned sp_reg:4;
unsigned unused:4; unsigned unused:4;
unsigned end:1;
unsigned signal:1; unsigned signal:1;
unsigned type:2; unsigned type:3;
#endif #endif
} __packed; } __packed;
......
...@@ -28,7 +28,6 @@ void __noreturn machine_real_restart(unsigned int type); ...@@ -28,7 +28,6 @@ void __noreturn machine_real_restart(unsigned int type);
void cpu_emergency_disable_virtualization(void); void cpu_emergency_disable_virtualization(void);
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_panic_self_stop(struct pt_regs *regs);
void nmi_shootdown_cpus(nmi_shootdown_cb callback); void nmi_shootdown_cpus(nmi_shootdown_cb callback);
void run_crash_ipi_callback(struct pt_regs *regs); void run_crash_ipi_callback(struct pt_regs *regs);
......
...@@ -125,11 +125,11 @@ void clear_bss(void); ...@@ -125,11 +125,11 @@ void clear_bss(void);
#ifdef __i386__ #ifdef __i386__
asmlinkage void __init i386_start_kernel(void); asmlinkage void __init __noreturn i386_start_kernel(void);
#else #else
asmlinkage void __init x86_64_start_kernel(char *real_mode); asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode);
asmlinkage void __init x86_64_start_reservations(char *real_mode_data); asmlinkage void __init __noreturn x86_64_start_reservations(char *real_mode_data);
#endif /* __i386__ */ #endif /* __i386__ */
#endif /* _SETUP */ #endif /* _SETUP */
......
...@@ -93,9 +93,10 @@ static inline void __cpu_die(unsigned int cpu) ...@@ -93,9 +93,10 @@ static inline void __cpu_die(unsigned int cpu)
smp_ops.cpu_die(cpu); smp_ops.cpu_die(cpu);
} }
static inline void play_dead(void) static inline void __noreturn play_dead(void)
{ {
smp_ops.play_dead(); smp_ops.play_dead();
BUG();
} }
static inline void smp_send_reschedule(int cpu) static inline void smp_send_reschedule(int cpu)
...@@ -124,7 +125,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); ...@@ -124,7 +125,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void); int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu); int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu); void native_cpu_die(unsigned int cpu);
void hlt_play_dead(void); void __noreturn hlt_play_dead(void);
void native_play_dead(void); void native_play_dead(void);
void play_dead_common(void); void play_dead_common(void);
void wbinvd_on_cpu(int cpu); void wbinvd_on_cpu(int cpu);
......
...@@ -7,12 +7,17 @@ ...@@ -7,12 +7,17 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
.macro UNWIND_HINT_EMPTY .macro UNWIND_HINT_END_OF_STACK
UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1 UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK
.endm
.macro UNWIND_HINT_UNDEFINED
UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
.endm .endm
.macro UNWIND_HINT_ENTRY .macro UNWIND_HINT_ENTRY
UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1 VALIDATE_UNRET_BEGIN
UNWIND_HINT_END_OF_STACK
.endm .endm
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 signal=1 .macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 signal=1
...@@ -52,6 +57,11 @@ ...@@ -52,6 +57,11 @@
UNWIND_HINT_REGS base=\base offset=\offset partial=1 signal=\signal UNWIND_HINT_REGS base=\base offset=\offset partial=1 signal=\signal
.endm .endm
.macro UNWIND_HINT_IRET_ENTRY base=%rsp offset=0 signal=1
VALIDATE_UNRET_BEGIN
UNWIND_HINT_IRET_REGS base=\base offset=\offset signal=\signal
.endm
.macro UNWIND_HINT_FUNC .macro UNWIND_HINT_FUNC
UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC
.endm .endm
...@@ -67,7 +77,7 @@ ...@@ -67,7 +77,7 @@
#else #else
#define UNWIND_HINT_FUNC \ #define UNWIND_HINT_FUNC \
UNWIND_HINT(ORC_REG_SP, 8, UNWIND_HINT_TYPE_FUNC, 0, 0) UNWIND_HINT(UNWIND_HINT_TYPE_FUNC, ORC_REG_SP, 8, 0)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -346,7 +346,7 @@ STACK_FRAME_NON_STANDARD_FP(__fentry__) ...@@ -346,7 +346,7 @@ STACK_FRAME_NON_STANDARD_FP(__fentry__)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(return_to_handler) SYM_CODE_START(return_to_handler)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
subq $16, %rsp subq $16, %rsp
......
...@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void) ...@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
} }
asmlinkage __visible void __init i386_start_kernel(void) asmlinkage __visible void __init __noreturn i386_start_kernel(void)
{ {
/* Make sure IDT is set up before any exception happens */ /* Make sure IDT is set up before any exception happens */
idt_setup_early_handler(); idt_setup_early_handler();
......
...@@ -471,7 +471,7 @@ static void __init copy_bootdata(char *real_mode_data) ...@@ -471,7 +471,7 @@ static void __init copy_bootdata(char *real_mode_data)
sme_unmap_bootdata(real_mode_data); sme_unmap_bootdata(real_mode_data);
} }
asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data)
{ {
/* /*
* Build-time sanity checks on the kernel image and module * Build-time sanity checks on the kernel image and module
...@@ -537,7 +537,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -537,7 +537,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
x86_64_start_reservations(real_mode_data); x86_64_start_reservations(real_mode_data);
} }
void __init x86_64_start_reservations(char *real_mode_data) void __init __noreturn x86_64_start_reservations(char *real_mode_data)
{ {
/* version is always not zero if it is copied */ /* version is always not zero if it is copied */
if (!boot_params.hdr.version) if (!boot_params.hdr.version)
......
...@@ -42,7 +42,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map) ...@@ -42,7 +42,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
__HEAD __HEAD
.code64 .code64
SYM_CODE_START_NOALIGN(startup_64) SYM_CODE_START_NOALIGN(startup_64)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
/* /*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded an identity mapped page table * and someone has loaded an identity mapped page table
...@@ -97,7 +97,7 @@ SYM_CODE_START_NOALIGN(startup_64) ...@@ -97,7 +97,7 @@ SYM_CODE_START_NOALIGN(startup_64)
lretq lretq
.Lon_kernel_cs: .Lon_kernel_cs:
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
/* Sanitize CPU configuration */ /* Sanitize CPU configuration */
call verify_cpu call verify_cpu
...@@ -119,7 +119,7 @@ SYM_CODE_START_NOALIGN(startup_64) ...@@ -119,7 +119,7 @@ SYM_CODE_START_NOALIGN(startup_64)
SYM_CODE_END(startup_64) SYM_CODE_END(startup_64)
SYM_CODE_START(secondary_startup_64) SYM_CODE_START(secondary_startup_64)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
/* /*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
...@@ -148,7 +148,7 @@ SYM_CODE_START(secondary_startup_64) ...@@ -148,7 +148,7 @@ SYM_CODE_START(secondary_startup_64)
* verify_cpu() above to make sure NX is enabled. * verify_cpu() above to make sure NX is enabled.
*/ */
SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
/* /*
...@@ -230,7 +230,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) ...@@ -230,7 +230,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
ANNOTATE_RETPOLINE_SAFE ANNOTATE_RETPOLINE_SAFE
jmp *%rax jmp *%rax
1: 1:
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR // above ANNOTATE_NOENDBR // above
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -383,7 +383,7 @@ SYM_CODE_END(secondary_startup_64) ...@@ -383,7 +383,7 @@ SYM_CODE_END(secondary_startup_64)
*/ */
SYM_CODE_START(start_cpu0) SYM_CODE_START(start_cpu0)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
/* Find the idle task stack */ /* Find the idle task stack */
movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
...@@ -406,8 +406,6 @@ SYM_CODE_START_NOALIGN(vc_boot_ghcb) ...@@ -406,8 +406,6 @@ SYM_CODE_START_NOALIGN(vc_boot_ghcb)
UNWIND_HINT_IRET_REGS offset=8 UNWIND_HINT_IRET_REGS offset=8
ENDBR ENDBR
ANNOTATE_UNRET_END
/* Build pt_regs */ /* Build pt_regs */
PUSH_AND_CLEAR_REGS PUSH_AND_CLEAR_REGS
...@@ -460,7 +458,6 @@ SYM_CODE_END(early_idt_handler_array) ...@@ -460,7 +458,6 @@ SYM_CODE_END(early_idt_handler_array)
SYM_CODE_START_LOCAL(early_idt_handler_common) SYM_CODE_START_LOCAL(early_idt_handler_common)
UNWIND_HINT_IRET_REGS offset=16 UNWIND_HINT_IRET_REGS offset=16
ANNOTATE_UNRET_END
/* /*
* The stack is the hardware frame, an error code or zero, and the * The stack is the hardware frame, an error code or zero, and the
* vector number. * vector number.
...@@ -510,8 +507,6 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb) ...@@ -510,8 +507,6 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
UNWIND_HINT_IRET_REGS offset=8 UNWIND_HINT_IRET_REGS offset=8
ENDBR ENDBR
ANNOTATE_UNRET_END
/* Build pt_regs */ /* Build pt_regs */
PUSH_AND_CLEAR_REGS PUSH_AND_CLEAR_REGS
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/prctl.h> #include <linux/prctl.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -721,7 +722,7 @@ static bool x86_idle_set(void) ...@@ -721,7 +722,7 @@ static bool x86_idle_set(void)
} }
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline void play_dead(void) static inline void __noreturn play_dead(void)
{ {
BUG(); BUG();
} }
...@@ -733,7 +734,7 @@ void arch_cpu_idle_enter(void) ...@@ -733,7 +734,7 @@ void arch_cpu_idle_enter(void)
local_touch_nmi(); local_touch_nmi();
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
play_dead(); play_dead();
} }
......
...@@ -920,7 +920,7 @@ void run_crash_ipi_callback(struct pt_regs *regs) ...@@ -920,7 +920,7 @@ void run_crash_ipi_callback(struct pt_regs *regs)
} }
/* Override the weak function in kernel/panic.c */ /* Override the weak function in kernel/panic.c */
void nmi_panic_self_stop(struct pt_regs *regs) void __noreturn nmi_panic_self_stop(struct pt_regs *regs)
{ {
while (1) { while (1) {
/* If no CPU is preparing crash dump, we simply loop here. */ /* If no CPU is preparing crash dump, we simply loop here. */
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
.code64 .code64
SYM_CODE_START_NOALIGN(relocate_range) SYM_CODE_START_NOALIGN(relocate_range)
SYM_CODE_START_NOALIGN(relocate_kernel) SYM_CODE_START_NOALIGN(relocate_kernel)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
/* /*
* %rdi indirection_page * %rdi indirection_page
...@@ -113,7 +113,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel) ...@@ -113,7 +113,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
SYM_CODE_END(relocate_kernel) SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
/* set return address to 0 if not preserving context */ /* set return address to 0 if not preserving context */
pushq $0 pushq $0
/* store the start address on the stack */ /* store the start address on the stack */
...@@ -231,7 +231,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ...@@ -231,7 +231,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
SYM_CODE_END(identity_mapped) SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR // RET target, above ANNOTATE_NOENDBR // RET target, above
movq RSP(%r8), %rsp movq RSP(%r8), %rsp
movq CR4(%r8), %rax movq CR4(%r8), %rax
...@@ -256,7 +256,7 @@ SYM_CODE_END(virtual_mapped) ...@@ -256,7 +256,7 @@ SYM_CODE_END(virtual_mapped)
/* Do the copies */ /* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages) SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
movq %rdi, %rcx /* Put the page_list in %rcx */ movq %rdi, %rcx /* Put the page_list in %rcx */
xorl %edi, %edi xorl %edi, %edi
xorl %esi, %esi xorl %esi, %esi
......
...@@ -1824,7 +1824,7 @@ static inline void mwait_play_dead(void) ...@@ -1824,7 +1824,7 @@ static inline void mwait_play_dead(void)
} }
} }
void hlt_play_dead(void) void __noreturn hlt_play_dead(void)
{ {
if (__this_cpu_read(cpu_info.x86) >= 4) if (__this_cpu_read(cpu_info.x86) >= 4)
wbinvd(); wbinvd();
......
...@@ -133,7 +133,7 @@ static struct orc_entry null_orc_entry = { ...@@ -133,7 +133,7 @@ static struct orc_entry null_orc_entry = {
.sp_offset = sizeof(long), .sp_offset = sizeof(long),
.sp_reg = ORC_REG_SP, .sp_reg = ORC_REG_SP,
.bp_reg = ORC_REG_UNDEFINED, .bp_reg = ORC_REG_UNDEFINED,
.type = UNWIND_HINT_TYPE_CALL .type = ORC_TYPE_CALL
}; };
#ifdef CONFIG_CALL_THUNKS #ifdef CONFIG_CALL_THUNKS
...@@ -153,12 +153,11 @@ static struct orc_entry *orc_callthunk_find(unsigned long ip) ...@@ -153,12 +153,11 @@ static struct orc_entry *orc_callthunk_find(unsigned long ip)
/* Fake frame pointer entry -- used as a fallback for generated code */ /* Fake frame pointer entry -- used as a fallback for generated code */
static struct orc_entry orc_fp_entry = { static struct orc_entry orc_fp_entry = {
.type = UNWIND_HINT_TYPE_CALL, .type = ORC_TYPE_CALL,
.sp_reg = ORC_REG_BP, .sp_reg = ORC_REG_BP,
.sp_offset = 16, .sp_offset = 16,
.bp_reg = ORC_REG_PREV_SP, .bp_reg = ORC_REG_PREV_SP,
.bp_offset = -16, .bp_offset = -16,
.end = 0,
}; };
static struct orc_entry *orc_find(unsigned long ip) static struct orc_entry *orc_find(unsigned long ip)
...@@ -250,13 +249,13 @@ static int orc_sort_cmp(const void *_a, const void *_b) ...@@ -250,13 +249,13 @@ static int orc_sort_cmp(const void *_a, const void *_b)
return -1; return -1;
/* /*
* The "weak" section terminator entries need to always be on the left * The "weak" section terminator entries need to always be first
* to ensure the lookup code skips them in favor of real entries. * to ensure the lookup code skips them in favor of real entries.
* These terminator entries exist to handle any gaps created by * These terminator entries exist to handle any gaps created by
* whitelisted .o files which didn't get objtool generation. * whitelisted .o files which didn't get objtool generation.
*/ */
orc_a = cur_orc_table + (a - cur_orc_ip_table); orc_a = cur_orc_table + (a - cur_orc_ip_table);
return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
} }
void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
...@@ -474,13 +473,11 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -474,13 +473,11 @@ bool unwind_next_frame(struct unwind_state *state)
*/ */
orc = &orc_fp_entry; orc = &orc_fp_entry;
state->error = true; state->error = true;
} } else {
if (orc->type == ORC_TYPE_UNDEFINED)
/* End-of-stack check for kernel threads: */
if (orc->sp_reg == ORC_REG_UNDEFINED) {
if (!orc->end)
goto err; goto err;
if (orc->type == ORC_TYPE_END_OF_STACK)
goto the_end; goto the_end;
} }
...@@ -554,7 +551,7 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -554,7 +551,7 @@ bool unwind_next_frame(struct unwind_state *state)
/* Find IP, SP and possibly regs: */ /* Find IP, SP and possibly regs: */
switch (orc->type) { switch (orc->type) {
case UNWIND_HINT_TYPE_CALL: case ORC_TYPE_CALL:
ip_p = sp - sizeof(long); ip_p = sp - sizeof(long);
if (!deref_stack_reg(state, ip_p, &state->ip)) if (!deref_stack_reg(state, ip_p, &state->ip))
...@@ -567,7 +564,7 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -567,7 +564,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->prev_regs = NULL; state->prev_regs = NULL;
break; break;
case UNWIND_HINT_TYPE_REGS: case ORC_TYPE_REGS:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
orc_warn_current("can't access registers at %pB\n", orc_warn_current("can't access registers at %pB\n",
(void *)orig_ip); (void *)orig_ip);
...@@ -590,13 +587,13 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -590,13 +587,13 @@ bool unwind_next_frame(struct unwind_state *state)
state->full_regs = true; state->full_regs = true;
break; break;
case UNWIND_HINT_TYPE_REGS_PARTIAL: case ORC_TYPE_REGS_PARTIAL:
if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
orc_warn_current("can't access iret registers at %pB\n", orc_warn_current("can't access iret registers at %pB\n",
(void *)orig_ip); (void *)orig_ip);
goto err; goto err;
} }
/* See UNWIND_HINT_TYPE_REGS case comment. */ /* See ORC_TYPE_REGS case comment. */
state->ip = unwind_recover_rethook(state, state->ip, state->ip = unwind_recover_rethook(state, state->ip,
(unsigned long *)(state->sp - sizeof(long))); (unsigned long *)(state->sp - sizeof(long)));
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
.align RETPOLINE_THUNK_SIZE .align RETPOLINE_THUNK_SIZE
SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
...@@ -75,7 +75,7 @@ SYM_CODE_END(__x86_indirect_thunk_array) ...@@ -75,7 +75,7 @@ SYM_CODE_END(__x86_indirect_thunk_array)
.align RETPOLINE_THUNK_SIZE .align RETPOLINE_THUNK_SIZE
SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL) SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
CALL_DEPTH_ACCOUNT CALL_DEPTH_ACCOUNT
...@@ -103,7 +103,7 @@ SYM_CODE_END(__x86_indirect_call_thunk_array) ...@@ -103,7 +103,7 @@ SYM_CODE_END(__x86_indirect_call_thunk_array)
.align RETPOLINE_THUNK_SIZE .align RETPOLINE_THUNK_SIZE
SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL) SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
POLINE \reg POLINE \reg
ANNOTATE_UNRET_SAFE ANNOTATE_UNRET_SAFE
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
SYM_CODE_START_LOCAL(pvh_start_xen) SYM_CODE_START_LOCAL(pvh_start_xen)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
cld cld
lgdt (_pa(gdt)) lgdt (_pa(gdt))
......
...@@ -288,7 +288,7 @@ EXPORT_SYMBOL(restore_processor_state); ...@@ -288,7 +288,7 @@ EXPORT_SYMBOL(restore_processor_state);
#endif #endif
#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU) #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
static void resume_play_dead(void) static void __noreturn resume_play_dead(void)
{ {
play_dead_common(); play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS); tboot_shutdown(TB_SHUTDOWN_WFS);
......
...@@ -165,7 +165,7 @@ xen_pv_trap asm_exc_xen_hypervisor_callback ...@@ -165,7 +165,7 @@ xen_pv_trap asm_exc_xen_hypervisor_callback
SYM_CODE_START(xen_early_idt_handler_array) SYM_CODE_START(xen_early_idt_handler_array)
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ENDBR ENDBR
pop %rcx pop %rcx
pop %r11 pop %r11
...@@ -193,7 +193,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 ...@@ -193,7 +193,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* rsp->rax } * rsp->rax }
*/ */
SYM_CODE_START(xen_iret) SYM_CODE_START(xen_iret)
UNWIND_HINT_EMPTY UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
pushq $0 pushq $0
jmp hypercall_iret jmp hypercall_iret
......
...@@ -45,7 +45,7 @@ SYM_CODE_END(hypercall_page) ...@@ -45,7 +45,7 @@ SYM_CODE_END(hypercall_page)
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
__INIT __INIT
SYM_CODE_START(startup_xen) SYM_CODE_START(startup_xen)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
cld cld
...@@ -71,7 +71,7 @@ SYM_CODE_END(startup_xen) ...@@ -71,7 +71,7 @@ SYM_CODE_END(startup_xen)
#ifdef CONFIG_XEN_PV_SMP #ifdef CONFIG_XEN_PV_SMP
.pushsection .text .pushsection .text
SYM_CODE_START(asm_cpu_bringup_and_idle) SYM_CODE_START(asm_cpu_bringup_and_idle)
UNWIND_HINT_EMPTY UNWIND_HINT_END_OF_STACK
ENDBR ENDBR
call cpu_bringup_and_idle call cpu_bringup_and_idle
......
...@@ -33,7 +33,7 @@ void show_ipi_list(struct seq_file *p, int prec); ...@@ -33,7 +33,7 @@ void show_ipi_list(struct seq_file *p, int prec);
void __cpu_die(unsigned int cpu); void __cpu_die(unsigned int cpu);
int __cpu_disable(void); int __cpu_disable(void);
void cpu_die(void); void __noreturn cpu_die(void);
void cpu_restart(void); void cpu_restart(void);
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
......
...@@ -322,7 +322,7 @@ void __cpu_die(unsigned int cpu) ...@@ -322,7 +322,7 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: unable to kill\n", cpu); pr_err("CPU%u: unable to kill\n", cpu);
} }
void arch_cpu_idle_dead(void) void __noreturn arch_cpu_idle_dead(void)
{ {
cpu_die(); cpu_die();
} }
...@@ -341,6 +341,8 @@ void __ref cpu_die(void) ...@@ -341,6 +341,8 @@ void __ref cpu_die(void)
__asm__ __volatile__( __asm__ __volatile__(
" movi a2, cpu_restart\n" " movi a2, cpu_restart\n"
" jx a2\n"); " jx a2\n");
BUG();
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
......
...@@ -6935,7 +6935,7 @@ EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); ...@@ -6935,7 +6935,7 @@ EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
* @ioc: Pointer to MPT_ADAPTER structure * @ioc: Pointer to MPT_ADAPTER structure
* *
**/ **/
void void __noreturn
mpt_halt_firmware(MPT_ADAPTER *ioc) mpt_halt_firmware(MPT_ADAPTER *ioc)
{ {
u32 ioc_raw_state; u32 ioc_raw_state;
......
...@@ -944,7 +944,7 @@ extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, ...@@ -944,7 +944,7 @@ extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
u8 phys_disk_num); u8 phys_disk_num);
extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
extern void mpt_halt_firmware(MPT_ADAPTER *ioc); extern void __noreturn mpt_halt_firmware(MPT_ADAPTER *ioc);
/* /*
......
...@@ -43,12 +43,14 @@ static void noinstr check_stackleak_irqoff(void) ...@@ -43,12 +43,14 @@ static void noinstr check_stackleak_irqoff(void)
* STACK_END_MAGIC, and in either casee something is seriously wrong. * STACK_END_MAGIC, and in either casee something is seriously wrong.
*/ */
if (current_sp < task_stack_low || current_sp >= task_stack_high) { if (current_sp < task_stack_low || current_sp >= task_stack_high) {
instrumentation_begin();
pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
current_sp, task_stack_low, task_stack_high - 1); current_sp, task_stack_low, task_stack_high - 1);
test_failed = true; test_failed = true;
goto out; goto out;
} }
if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) { if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
instrumentation_begin();
pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
lowest_sp, task_stack_low, task_stack_high - 1); lowest_sp, task_stack_low, task_stack_high - 1);
test_failed = true; test_failed = true;
...@@ -86,11 +88,14 @@ static void noinstr check_stackleak_irqoff(void) ...@@ -86,11 +88,14 @@ static void noinstr check_stackleak_irqoff(void)
if (*(unsigned long *)poison_low == STACKLEAK_POISON) if (*(unsigned long *)poison_low == STACKLEAK_POISON)
continue; continue;
instrumentation_begin();
pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n", pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
poison_high - poison_low, *(unsigned long *)poison_low); poison_high - poison_low, *(unsigned long *)poison_low);
test_failed = true; test_failed = true;
goto out;
} }
instrumentation_begin();
pr_info("stackleak stack usage:\n" pr_info("stackleak stack usage:\n"
" high offset: %lu bytes\n" " high offset: %lu bytes\n"
" current: %lu bytes\n" " current: %lu bytes\n"
...@@ -113,6 +118,7 @@ static void noinstr check_stackleak_irqoff(void) ...@@ -113,6 +118,7 @@ static void noinstr check_stackleak_irqoff(void)
} else { } else {
pr_info("OK: the rest of the thread stack is properly erased\n"); pr_info("OK: the rest of the thread stack is properly erased\n");
} }
instrumentation_end();
} }
static void lkdtm_STACKLEAK_ERASING(void) static void lkdtm_STACKLEAK_ERASING(void)
......
...@@ -98,7 +98,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { } ...@@ -98,7 +98,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline int ct_state(void) { return -1; } static inline int ct_state(void) { return -1; }
static inline int __ct_state(void) { return -1; } static inline int __ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; } static __always_inline bool context_tracking_guest_enter(void) { return false; }
static inline void context_tracking_guest_exit(void) { } static __always_inline void context_tracking_guest_exit(void) { }
#define CT_WARN_ON(cond) do { } while (0) #define CT_WARN_ON(cond) do { } while (0)
#endif /* !CONFIG_CONTEXT_TRACKING_USER */ #endif /* !CONFIG_CONTEXT_TRACKING_USER */
......
...@@ -182,7 +182,7 @@ void arch_cpu_idle(void); ...@@ -182,7 +182,7 @@ void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void); void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void); void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void); void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void); void __noreturn arch_cpu_idle_dead(void);
int cpu_report_state(int cpu); int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu); int cpu_check_up_prepare(int cpu);
......
...@@ -2,47 +2,7 @@ ...@@ -2,47 +2,7 @@
#ifndef _LINUX_OBJTOOL_H #ifndef _LINUX_OBJTOOL_H
#define _LINUX_OBJTOOL_H #define _LINUX_OBJTOOL_H
#ifndef __ASSEMBLY__ #include <linux/objtool_types.h>
#include <linux/types.h>
/*
* This struct is used by asm and inline asm code to manually annotate the
* location of registers on the stack.
*/
struct unwind_hint {
u32 ip;
s16 sp_offset;
u8 sp_reg;
u8 type;
u8 signal;
u8 end;
};
#endif
/*
* UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
* (the caller's SP right before it made the call). Used for all callable
* functions, i.e. all C code and all callable asm functions.
*
* UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
* points to a fully populated pt_regs from a syscall, interrupt, or exception.
*
* UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
* sp_reg+sp_offset points to the iret return frame.
*
* UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
* Useful for code which doesn't have an ELF function annotation.
*
* UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
*/
#define UNWIND_HINT_TYPE_CALL 0
#define UNWIND_HINT_TYPE_REGS 1
#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
#define UNWIND_HINT_TYPE_FUNC 3
#define UNWIND_HINT_TYPE_ENTRY 4
#define UNWIND_HINT_TYPE_SAVE 5
#define UNWIND_HINT_TYPE_RESTORE 6
#ifdef CONFIG_OBJTOOL #ifdef CONFIG_OBJTOOL
...@@ -50,7 +10,7 @@ struct unwind_hint { ...@@ -50,7 +10,7 @@ struct unwind_hint {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ #define UNWIND_HINT(type, sp_reg, sp_offset, signal) \
"987: \n\t" \ "987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \ ".pushsection .discard.unwind_hints\n\t" \
/* struct unwind_hint */ \ /* struct unwind_hint */ \
...@@ -59,7 +19,6 @@ struct unwind_hint { ...@@ -59,7 +19,6 @@ struct unwind_hint {
".byte " __stringify(sp_reg) "\n\t" \ ".byte " __stringify(sp_reg) "\n\t" \
".byte " __stringify(type) "\n\t" \ ".byte " __stringify(type) "\n\t" \
".byte " __stringify(signal) "\n\t" \ ".byte " __stringify(signal) "\n\t" \
".byte " __stringify(end) "\n\t" \
".balign 4 \n\t" \ ".balign 4 \n\t" \
".popsection\n\t" ".popsection\n\t"
...@@ -89,7 +48,7 @@ struct unwind_hint { ...@@ -89,7 +48,7 @@ struct unwind_hint {
#define ANNOTATE_NOENDBR \ #define ANNOTATE_NOENDBR \
"986: \n\t" \ "986: \n\t" \
".pushsection .discard.noendbr\n\t" \ ".pushsection .discard.noendbr\n\t" \
_ASM_PTR " 986b\n\t" \ ".long 986b - .\n\t" \
".popsection\n\t" ".popsection\n\t"
#define ASM_REACHABLE \ #define ASM_REACHABLE \
...@@ -107,7 +66,7 @@ struct unwind_hint { ...@@ -107,7 +66,7 @@ struct unwind_hint {
#define ANNOTATE_INTRA_FUNCTION_CALL \ #define ANNOTATE_INTRA_FUNCTION_CALL \
999: \ 999: \
.pushsection .discard.intra_function_calls; \ .pushsection .discard.intra_function_calls; \
.long 999b; \ .long 999b - .; \
.popsection; .popsection;
/* /*
...@@ -131,23 +90,22 @@ struct unwind_hint { ...@@ -131,23 +90,22 @@ struct unwind_hint {
* the debuginfo as necessary. It will also warn if it sees any * the debuginfo as necessary. It will also warn if it sees any
* inconsistencies. * inconsistencies.
*/ */
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
.Lunwind_hint_ip_\@: .Lhere_\@:
.pushsection .discard.unwind_hints .pushsection .discard.unwind_hints
/* struct unwind_hint */ /* struct unwind_hint */
.long .Lunwind_hint_ip_\@ - . .long .Lhere_\@ - .
.short \sp_offset .short \sp_offset
.byte \sp_reg .byte \sp_reg
.byte \type .byte \type
.byte \signal .byte \signal
.byte \end
.balign 4 .balign 4
.popsection .popsection
.endm .endm
.macro STACK_FRAME_NON_STANDARD func:req .macro STACK_FRAME_NON_STANDARD func:req
.pushsection .discard.func_stack_frame_non_standard, "aw" .pushsection .discard.func_stack_frame_non_standard, "aw"
_ASM_PTR \func .long \func - .
.popsection .popsection
.endm .endm
...@@ -160,8 +118,24 @@ struct unwind_hint { ...@@ -160,8 +118,24 @@ struct unwind_hint {
.macro ANNOTATE_NOENDBR .macro ANNOTATE_NOENDBR
.Lhere_\@: .Lhere_\@:
.pushsection .discard.noendbr .pushsection .discard.noendbr
.quad .Lhere_\@ .long .Lhere_\@ - .
.popsection
.endm
/*
* Use objtool to validate the entry requirement that all code paths do
* VALIDATE_UNRET_END before RET.
*
* NOTE: The macro must be used at the beginning of a global symbol, otherwise
* it will be ignored.
*/
.macro VALIDATE_UNRET_BEGIN
#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
.Lhere_\@:
.pushsection .discard.validate_unret
.long .Lhere_\@ - .
.popsection .popsection
#endif
.endm .endm
.macro REACHABLE .macro REACHABLE
...@@ -177,15 +151,14 @@ struct unwind_hint { ...@@ -177,15 +151,14 @@ struct unwind_hint {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ #define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t"
"\n\t"
#define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD(func)
#define STACK_FRAME_NON_STANDARD_FP(func) #define STACK_FRAME_NON_STANDARD_FP(func)
#define ANNOTATE_NOENDBR #define ANNOTATE_NOENDBR
#define ASM_REACHABLE #define ASM_REACHABLE
#else #else
#define ANNOTATE_INTRA_FUNCTION_CALL #define ANNOTATE_INTRA_FUNCTION_CALL
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
.endm .endm
.macro STACK_FRAME_NON_STANDARD func:req .macro STACK_FRAME_NON_STANDARD func:req
.endm .endm
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_OBJTOOL_TYPES_H
#define _LINUX_OBJTOOL_TYPES_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* This struct is used by asm and inline asm code to manually annotate the
* location of registers on the stack.
*/
struct unwind_hint {
u32 ip;
s16 sp_offset;
u8 sp_reg;
u8 type;
u8 signal;
};
#endif /* __ASSEMBLY__ */
/*
* UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in
* a truncated and unreliable stack unwind.
*
* UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before
* hitting user entry, boot code, or fork entry (when there are no pt_regs
* available).
*
* UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
* (the caller's SP right before it made the call). Used for all callable
* functions, i.e. all C code and all callable asm functions.
*
* UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
* points to a fully populated pt_regs from a syscall, interrupt, or exception.
*
* UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
* sp_reg+sp_offset points to the iret return frame.
*
* UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function.
* Useful for code which doesn't have an ELF function annotation.
*
* UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain
* location so that it can be restored later.
*/
#define UNWIND_HINT_TYPE_UNDEFINED 0
#define UNWIND_HINT_TYPE_END_OF_STACK 1
#define UNWIND_HINT_TYPE_CALL 2
#define UNWIND_HINT_TYPE_REGS 3
#define UNWIND_HINT_TYPE_REGS_PARTIAL 4
/* The below hint types don't have corresponding ORC types */
#define UNWIND_HINT_TYPE_FUNC 5
#define UNWIND_HINT_TYPE_SAVE 6
#define UNWIND_HINT_TYPE_RESTORE 7
#endif /* _LINUX_OBJTOOL_TYPES_H */
...@@ -23,7 +23,7 @@ static __always_inline void *task_stack_page(const struct task_struct *task) ...@@ -23,7 +23,7 @@ static __always_inline void *task_stack_page(const struct task_struct *task)
#define setup_thread_stack(new,old) do { } while(0) #define setup_thread_stack(new,old) do { } while(0)
static inline unsigned long *end_of_stack(const struct task_struct *task) static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{ {
#ifdef CONFIG_STACK_GROWSUP #ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
......
...@@ -59,8 +59,8 @@ int smp_call_function_single_async(int cpu, struct __call_single_data *csd); ...@@ -59,8 +59,8 @@ int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
* Cpus stopping functions in panic. All have default weak definitions. * Cpus stopping functions in panic. All have default weak definitions.
* Architecture-dependent code may override them. * Architecture-dependent code may override them.
*/ */
void panic_smp_self_stop(void); void __noreturn panic_smp_self_stop(void);
void nmi_panic_self_stop(struct pt_regs *regs); void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
void crash_smp_send_stop(void); void crash_smp_send_stop(void);
/* /*
......
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
/* Define the prototype for start_kernel here, rather than cluttering /* Define the prototype for start_kernel here, rather than cluttering
up something else. */ up something else. */
extern asmlinkage void __init start_kernel(void); extern asmlinkage void __init __noreturn start_kernel(void);
extern void __init arch_call_rest_init(void); extern void __init __noreturn arch_call_rest_init(void);
extern void __ref rest_init(void); extern void __ref __noreturn rest_init(void);
#endif /* _LINUX_START_KERNEL_H */ #endif /* _LINUX_START_KERNEL_H */
...@@ -686,7 +686,7 @@ static void __init setup_command_line(char *command_line) ...@@ -686,7 +686,7 @@ static void __init setup_command_line(char *command_line)
static __initdata DECLARE_COMPLETION(kthreadd_done); static __initdata DECLARE_COMPLETION(kthreadd_done);
noinline void __ref rest_init(void) noinline void __ref __noreturn rest_init(void)
{ {
struct task_struct *tsk; struct task_struct *tsk;
int pid; int pid;
...@@ -829,7 +829,7 @@ static int __init early_randomize_kstack_offset(char *buf) ...@@ -829,7 +829,7 @@ static int __init early_randomize_kstack_offset(char *buf)
early_param("randomize_kstack_offset", early_randomize_kstack_offset); early_param("randomize_kstack_offset", early_randomize_kstack_offset);
#endif #endif
void __init __weak arch_call_rest_init(void) void __init __weak __noreturn arch_call_rest_init(void)
{ {
rest_init(); rest_init();
} }
...@@ -877,7 +877,7 @@ static void __init print_unknown_bootoptions(void) ...@@ -877,7 +877,7 @@ static void __init print_unknown_bootoptions(void)
memblock_free(unknown_options, len); memblock_free(unknown_options, len);
} }
asmlinkage __visible void __init __no_sanitize_address start_kernel(void) asmlinkage __visible void __init __no_sanitize_address __noreturn start_kernel(void)
{ {
char *command_line; char *command_line;
char *after_dashes; char *after_dashes;
......
...@@ -141,7 +141,7 @@ EXPORT_SYMBOL(panic_blink); ...@@ -141,7 +141,7 @@ EXPORT_SYMBOL(panic_blink);
/* /*
* Stop ourself in panic -- architecture code may override this * Stop ourself in panic -- architecture code may override this
*/ */
void __weak panic_smp_self_stop(void) void __weak __noreturn panic_smp_self_stop(void)
{ {
while (1) while (1)
cpu_relax(); cpu_relax();
...@@ -151,7 +151,7 @@ void __weak panic_smp_self_stop(void) ...@@ -151,7 +151,7 @@ void __weak panic_smp_self_stop(void)
* Stop ourselves in NMI context if another CPU has already panicked. Arch code * Stop ourselves in NMI context if another CPU has already panicked. Arch code
* may override this to prepare for crash dumping, e.g. save regs info. * may override this to prepare for crash dumping, e.g. save regs info.
*/ */
void __weak nmi_panic_self_stop(struct pt_regs *regs) void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs)
{ {
panic_smp_self_stop(); panic_smp_self_stop();
} }
......
...@@ -75,7 +75,7 @@ static noinline int __cpuidle cpu_idle_poll(void) ...@@ -75,7 +75,7 @@ static noinline int __cpuidle cpu_idle_poll(void)
void __weak arch_cpu_idle_prepare(void) { } void __weak arch_cpu_idle_prepare(void) { }
void __weak arch_cpu_idle_enter(void) { } void __weak arch_cpu_idle_enter(void) { }
void __weak arch_cpu_idle_exit(void) { } void __weak arch_cpu_idle_exit(void) { }
void __weak arch_cpu_idle_dead(void) { } void __weak __noreturn arch_cpu_idle_dead(void) { while (1); }
void __weak arch_cpu_idle(void) void __weak arch_cpu_idle(void)
{ {
cpu_idle_force_poll = 1; cpu_idle_force_poll = 1;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
# Disassemble a single function. # Disassemble a single function.
# #
# usage: objdump-func <file> <func> # usage: objdump-func <file> <func> [<func> ...]
set -o errexit set -o errexit
set -o nounset set -o nounset
...@@ -13,17 +13,33 @@ OBJDUMP="${CROSS_COMPILE:-}objdump" ...@@ -13,17 +13,33 @@ OBJDUMP="${CROSS_COMPILE:-}objdump"
command -v gawk >/dev/null 2>&1 || die "gawk isn't installed" command -v gawk >/dev/null 2>&1 || die "gawk isn't installed"
usage() { usage() {
echo "usage: objdump-func <file> <func>" >&2 echo "usage: objdump-func <file> <func> [<func> ...]" >&2
exit 1 exit 1
} }
[[ $# -lt 2 ]] && usage [[ $# -lt 2 ]] && usage
OBJ=$1; shift OBJ=$1; shift
FUNC=$1; shift FUNCS=("$@")
# Secret feature to allow adding extra objdump args at the end ${OBJDUMP} -wdr $OBJ | gawk -M -v _funcs="${FUNCS[*]}" '
EXTRA_ARGS=$@ BEGIN { split(_funcs, funcs); }
/^$/ { func_match=0; }
# Note this also matches compiler-added suffixes like ".cold", etc /<.*>:/ {
${OBJDUMP} -wdr $EXTRA_ARGS $OBJ | gawk -M -v f=$FUNC '/^$/ { P=0; } $0 ~ "<" f "(\\..*)?>:" { P=1; O=strtonum("0x" $1); } { if (P) { o=strtonum("0x" $1); printf("%04x ", o-O); print $0; } }' f = gensub(/.*<(.*)>:/, "\\1", 1);
for (i in funcs) {
# match compiler-added suffixes like ".cold", etc
if (f ~ "^" funcs[i] "(\\..*)?") {
func_match = 1;
base = strtonum("0x" $1);
break;
}
}
}
{
if (func_match) {
addr = strtonum("0x" $1);
printf("%04x ", addr - base);
print;
}
}'
...@@ -128,7 +128,7 @@ static int orc_sort_cmp(const void *_a, const void *_b) ...@@ -128,7 +128,7 @@ static int orc_sort_cmp(const void *_a, const void *_b)
* whitelisted .o files which didn't get objtool generation. * whitelisted .o files which didn't get objtool generation.
*/ */
orc_a = g_orc_table + (a - g_orc_ip_table); orc_a = g_orc_table + (a - g_orc_ip_table);
return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
} }
static void *sort_orctable(void *arg) static void *sort_orctable(void *arg)
......
...@@ -39,6 +39,12 @@ ...@@ -39,6 +39,12 @@
#define ORC_REG_SP_INDIRECT 9 #define ORC_REG_SP_INDIRECT 9
#define ORC_REG_MAX 15 #define ORC_REG_MAX 15
#define ORC_TYPE_UNDEFINED 0
#define ORC_TYPE_END_OF_STACK 1
#define ORC_TYPE_CALL 2
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -56,16 +62,14 @@ struct orc_entry { ...@@ -56,16 +62,14 @@ struct orc_entry {
#if defined(__LITTLE_ENDIAN_BITFIELD) #if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned sp_reg:4; unsigned sp_reg:4;
unsigned bp_reg:4; unsigned bp_reg:4;
unsigned type:2; unsigned type:3;
unsigned signal:1; unsigned signal:1;
unsigned end:1;
#elif defined(__BIG_ENDIAN_BITFIELD) #elif defined(__BIG_ENDIAN_BITFIELD)
unsigned bp_reg:4; unsigned bp_reg:4;
unsigned sp_reg:4; unsigned sp_reg:4;
unsigned unused:4; unsigned unused:4;
unsigned end:1;
unsigned signal:1; unsigned signal:1;
unsigned type:2; unsigned type:3;
#endif #endif
} __packed; } __packed;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_OBJTOOL_H
#define _LINUX_OBJTOOL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* This struct is used by asm and inline asm code to manually annotate the
* location of registers on the stack.
*/
struct unwind_hint {
u32 ip;
s16 sp_offset;
u8 sp_reg;
u8 type;
u8 signal;
u8 end;
};
#endif
/*
* UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
* (the caller's SP right before it made the call). Used for all callable
* functions, i.e. all C code and all callable asm functions.
*
* UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
* points to a fully populated pt_regs from a syscall, interrupt, or exception.
*
* UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
* sp_reg+sp_offset points to the iret return frame.
*
* UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
* Useful for code which doesn't have an ELF function annotation.
*
* UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
*/
#define UNWIND_HINT_TYPE_CALL 0
#define UNWIND_HINT_TYPE_REGS 1
#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
#define UNWIND_HINT_TYPE_FUNC 3
#define UNWIND_HINT_TYPE_ENTRY 4
#define UNWIND_HINT_TYPE_SAVE 5
#define UNWIND_HINT_TYPE_RESTORE 6
#ifdef CONFIG_OBJTOOL
#include <asm/asm.h>
#ifndef __ASSEMBLY__
#define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \
"987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \
/* struct unwind_hint */ \
".long 987b - .\n\t" \
".short " __stringify(sp_offset) "\n\t" \
".byte " __stringify(sp_reg) "\n\t" \
".byte " __stringify(type) "\n\t" \
".byte " __stringify(signal) "\n\t" \
".byte " __stringify(end) "\n\t" \
".balign 4 \n\t" \
".popsection\n\t"
/*
* This macro marks the given function's stack frame as "non-standard", which
* tells objtool to ignore the function when doing stack metadata validation.
* It should only be used in special cases where you're 100% sure it won't
* affect the reliability of frame pointers and kernel stack traces.
*
* For more information, see tools/objtool/Documentation/objtool.txt.
*/
#define STACK_FRAME_NON_STANDARD(func) \
static void __used __section(".discard.func_stack_frame_non_standard") \
*__func_stack_frame_non_standard_##func = func
/*
* STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
* for the case where a function is intentionally missing frame pointer setup,
* but otherwise needs objtool/ORC coverage when frame pointers are disabled.
*/
#ifdef CONFIG_FRAME_POINTER
#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
#else
#define STACK_FRAME_NON_STANDARD_FP(func)
#endif
#define ANNOTATE_NOENDBR \
"986: \n\t" \
".pushsection .discard.noendbr\n\t" \
_ASM_PTR " 986b\n\t" \
".popsection\n\t"
#define ASM_REACHABLE \
"998:\n\t" \
".pushsection .discard.reachable\n\t" \
".long 998b - .\n\t" \
".popsection\n\t"
#else /* __ASSEMBLY__ */
/*
* This macro indicates that the following intra-function call is valid.
* Any non-annotated intra-function call will cause objtool to issue a warning.
*/
#define ANNOTATE_INTRA_FUNCTION_CALL \
999: \
.pushsection .discard.intra_function_calls; \
.long 999b; \
.popsection;
/*
* In asm, there are two kinds of code: normal C-type callable functions and
* the rest. The normal callable functions can be called by other code, and
* don't do anything unusual with the stack. Such normal callable functions
* are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
* category. In this case, no special debugging annotations are needed because
* objtool can automatically generate the ORC data for the ORC unwinder to read
* at runtime.
*
* Anything which doesn't fall into the above category, such as syscall and
* interrupt handlers, tends to not be called directly by other functions, and
* often does unusual non-C-function-type things with the stack pointer. Such
* code needs to be annotated such that objtool can understand it. The
* following CFI hint macros are for this type of code.
*
* These macros provide hints to objtool about the state of the stack at each
* instruction. Objtool starts from the hints and follows the code flow,
* making automatic CFI adjustments when it sees pushes and pops, filling out
* the debuginfo as necessary. It will also warn if it sees any
* inconsistencies.
*/
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0
.Lunwind_hint_ip_\@:
.pushsection .discard.unwind_hints
/* struct unwind_hint */
.long .Lunwind_hint_ip_\@ - .
.short \sp_offset
.byte \sp_reg
.byte \type
.byte \signal
.byte \end
.balign 4
.popsection
.endm
.macro STACK_FRAME_NON_STANDARD func:req
.pushsection .discard.func_stack_frame_non_standard, "aw"
_ASM_PTR \func
.popsection
.endm
.macro STACK_FRAME_NON_STANDARD_FP func:req
#ifdef CONFIG_FRAME_POINTER
STACK_FRAME_NON_STANDARD \func
#endif
.endm
.macro ANNOTATE_NOENDBR
.Lhere_\@:
.pushsection .discard.noendbr
.quad .Lhere_\@
.popsection
.endm
.macro REACHABLE
.Lhere_\@:
.pushsection .discard.reachable
.long .Lhere_\@ - .
.popsection
.endm
#endif /* __ASSEMBLY__ */
#else /* !CONFIG_OBJTOOL */
#ifndef __ASSEMBLY__
#define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \
"\n\t"
#define STACK_FRAME_NON_STANDARD(func)
#define STACK_FRAME_NON_STANDARD_FP(func)
#define ANNOTATE_NOENDBR
#define ASM_REACHABLE
#else
#define ANNOTATE_INTRA_FUNCTION_CALL
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0
.endm
.macro STACK_FRAME_NON_STANDARD func:req
.endm
.macro ANNOTATE_NOENDBR
.endm
.macro REACHABLE
.endm
#endif
#endif /* CONFIG_OBJTOOL */
#endif /* _LINUX_OBJTOOL_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_OBJTOOL_TYPES_H
#define _LINUX_OBJTOOL_TYPES_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* This struct is used by asm and inline asm code to manually annotate the
* location of registers on the stack.
*/
struct unwind_hint {
u32 ip;
s16 sp_offset;
u8 sp_reg;
u8 type;
u8 signal;
};
#endif /* __ASSEMBLY__ */
/*
* UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in
* a truncated and unreliable stack unwind.
*
* UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before
* hitting user entry, boot code, or fork entry (when there are no pt_regs
* available).
*
* UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
* (the caller's SP right before it made the call). Used for all callable
* functions, i.e. all C code and all callable asm functions.
*
* UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
* points to a fully populated pt_regs from a syscall, interrupt, or exception.
*
* UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
* sp_reg+sp_offset points to the iret return frame.
*
* UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function.
* Useful for code which doesn't have an ELF function annotation.
*
* UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain
* location so that it can be restored later.
*/
#define UNWIND_HINT_TYPE_UNDEFINED 0
#define UNWIND_HINT_TYPE_END_OF_STACK 1
#define UNWIND_HINT_TYPE_CALL 2
#define UNWIND_HINT_TYPE_REGS 3
#define UNWIND_HINT_TYPE_REGS_PARTIAL 4
/* The below hint types don't have corresponding ORC types */
#define UNWIND_HINT_TYPE_FUNC 5
#define UNWIND_HINT_TYPE_SAVE 6
#define UNWIND_HINT_TYPE_RESTORE 7
#endif /* _LINUX_OBJTOOL_TYPES_H */
This diff is collapsed.
...@@ -474,7 +474,7 @@ static int read_symbols(struct elf *elf) ...@@ -474,7 +474,7 @@ static int read_symbols(struct elf *elf)
/* Create parent/child links for any cold subfunctions */ /* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) { list_for_each_entry(sec, &elf->sections, list) {
list_for_each_entry(sym, &sec->symbol_list, list) { sec_for_each_sym(sec, sym) {
char pname[MAX_NAME_LEN + 1]; char pname[MAX_NAME_LEN + 1];
size_t pnamelen; size_t pnamelen;
if (sym->type != STT_FUNC) if (sym->type != STT_FUNC)
......
...@@ -61,7 +61,7 @@ struct instruction { ...@@ -61,7 +61,7 @@ struct instruction {
restore : 1, restore : 1,
retpoline_safe : 1, retpoline_safe : 1,
noendbr : 1, noendbr : 1,
entry : 1, unret : 1,
visited : 4, visited : 4,
no_reloc : 1; no_reloc : 1;
/* 10 bit hole */ /* 10 bit hole */
...@@ -92,7 +92,7 @@ static inline struct symbol *insn_func(struct instruction *insn) ...@@ -92,7 +92,7 @@ static inline struct symbol *insn_func(struct instruction *insn)
#define VISITED_BRANCH 0x01 #define VISITED_BRANCH 0x01
#define VISITED_BRANCH_UACCESS 0x02 #define VISITED_BRANCH_UACCESS 0x02
#define VISITED_BRANCH_MASK 0x03 #define VISITED_BRANCH_MASK 0x03
#define VISITED_ENTRY 0x04 #define VISITED_UNRET 0x04
static inline bool is_static_jump(struct instruction *insn) static inline bool is_static_jump(struct instruction *insn)
{ {
......
...@@ -188,4 +188,13 @@ struct symbol *find_func_containing(struct section *sec, unsigned long offset); ...@@ -188,4 +188,13 @@ struct symbol *find_func_containing(struct section *sec, unsigned long offset);
#define for_each_sec(file, sec) \ #define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list) list_for_each_entry(sec, &file->elf->sections, list)
#define sec_for_each_sym(sec, sym) \
list_for_each_entry(sym, &sec->symbol_list, list)
#define for_each_sym(file, sym) \
for (struct section *__sec, *__fake = (struct section *)1; \
__fake; __fake = NULL) \
for_each_sec(file, __sec) \
sec_for_each_sym(__sec, sym)
#endif /* _OBJTOOL_ELF_H */ #endif /* _OBJTOOL_ELF_H */
...@@ -53,6 +53,11 @@ static inline char *offstr(struct section *sec, unsigned long offset) ...@@ -53,6 +53,11 @@ static inline char *offstr(struct section *sec, unsigned long offset)
free(_str); \ free(_str); \
}) })
#define WARN_INSN(insn, format, ...) \
({ \
WARN_FUNC(format, insn->sec, insn->offset, ##__VA_ARGS__); \
})
#define BT_FUNC(format, insn, ...) \ #define BT_FUNC(format, insn, ...) \
({ \ ({ \
struct instruction *_insn = (insn); \ struct instruction *_insn = (insn); \
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
*/ */
#include <unistd.h> #include <unistd.h>
#include <linux/objtool.h>
#include <asm/orc_types.h> #include <asm/orc_types.h>
#include <objtool/objtool.h> #include <objtool/objtool.h>
#include <objtool/warn.h> #include <objtool/warn.h>
...@@ -39,11 +38,15 @@ static const char *reg_name(unsigned int reg) ...@@ -39,11 +38,15 @@ static const char *reg_name(unsigned int reg)
static const char *orc_type_name(unsigned int type) static const char *orc_type_name(unsigned int type)
{ {
switch (type) { switch (type) {
case UNWIND_HINT_TYPE_CALL: case ORC_TYPE_UNDEFINED:
return "(und)";
case ORC_TYPE_END_OF_STACK:
return "end";
case ORC_TYPE_CALL:
return "call"; return "call";
case UNWIND_HINT_TYPE_REGS: case ORC_TYPE_REGS:
return "regs"; return "regs";
case UNWIND_HINT_TYPE_REGS_PARTIAL: case ORC_TYPE_REGS_PARTIAL:
return "regs (partial)"; return "regs (partial)";
default: default:
return "?"; return "?";
...@@ -202,6 +205,7 @@ int orc_dump(const char *_objname) ...@@ -202,6 +205,7 @@ int orc_dump(const char *_objname)
printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i])); printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
} }
printf("type:%s", orc_type_name(orc[i].type));
printf(" sp:"); printf(" sp:");
...@@ -211,8 +215,7 @@ int orc_dump(const char *_objname) ...@@ -211,8 +215,7 @@ int orc_dump(const char *_objname)
print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset));
printf(" type:%s signal:%d end:%d\n", printf(" signal:%d\n", orc[i].signal);
orc_type_name(orc[i].type), orc[i].signal, orc[i].end);
} }
elf_end(elf); elf_end(elf);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <linux/objtool.h> #include <linux/objtool_types.h>
#include <asm/orc_types.h> #include <asm/orc_types.h>
#include <objtool/check.h> #include <objtool/check.h>
...@@ -21,19 +21,38 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, ...@@ -21,19 +21,38 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
memset(orc, 0, sizeof(*orc)); memset(orc, 0, sizeof(*orc));
if (!cfi) { if (!cfi) {
orc->end = 0; /*
orc->sp_reg = ORC_REG_UNDEFINED; * This is usually either unreachable nops/traps (which don't
* trigger unreachable instruction warnings), or
* STACK_FRAME_NON_STANDARD functions.
*/
orc->type = ORC_TYPE_UNDEFINED;
return 0; return 0;
} }
orc->end = cfi->end; switch (cfi->type) {
orc->signal = cfi->signal; case UNWIND_HINT_TYPE_UNDEFINED:
orc->type = ORC_TYPE_UNDEFINED;
if (cfi->cfa.base == CFI_UNDEFINED) { return 0;
orc->sp_reg = ORC_REG_UNDEFINED; case UNWIND_HINT_TYPE_END_OF_STACK:
orc->type = ORC_TYPE_END_OF_STACK;
return 0; return 0;
case UNWIND_HINT_TYPE_CALL:
orc->type = ORC_TYPE_CALL;
break;
case UNWIND_HINT_TYPE_REGS:
orc->type = ORC_TYPE_REGS;
break;
case UNWIND_HINT_TYPE_REGS_PARTIAL:
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
} }
orc->signal = cfi->signal;
switch (cfi->cfa.base) { switch (cfi->cfa.base) {
case CFI_SP: case CFI_SP:
orc->sp_reg = ORC_REG_SP; orc->sp_reg = ORC_REG_SP;
...@@ -60,8 +79,7 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, ...@@ -60,8 +79,7 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
orc->sp_reg = ORC_REG_DX; orc->sp_reg = ORC_REG_DX;
break; break;
default: default:
WARN_FUNC("unknown CFA base reg %d", WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
insn->sec, insn->offset, cfi->cfa.base);
return -1; return -1;
} }
...@@ -76,14 +94,12 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, ...@@ -76,14 +94,12 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
orc->bp_reg = ORC_REG_BP; orc->bp_reg = ORC_REG_BP;
break; break;
default: default:
WARN_FUNC("unknown BP base reg %d", WARN_INSN(insn, "unknown BP base reg %d", bp->base);
insn->sec, insn->offset, bp->base);
return -1; return -1;
} }
orc->sp_offset = cfi->cfa.offset; orc->sp_offset = cfi->cfa.offset;
orc->bp_offset = bp->offset; orc->bp_offset = bp->offset;
orc->type = cfi->type;
return 0; return 0;
} }
...@@ -148,11 +164,7 @@ int orc_create(struct objtool_file *file) ...@@ -148,11 +164,7 @@ int orc_create(struct objtool_file *file)
struct orc_list_entry *entry; struct orc_list_entry *entry;
struct list_head orc_list; struct list_head orc_list;
struct orc_entry null = { struct orc_entry null = { .type = ORC_TYPE_UNDEFINED };
.sp_reg = ORC_REG_UNDEFINED,
.bp_reg = ORC_REG_UNDEFINED,
.type = UNWIND_HINT_TYPE_CALL,
};
/* Build a deduplicated list of ORC entries: */ /* Build a deduplicated list of ORC entries: */
INIT_LIST_HEAD(&orc_list); INIT_LIST_HEAD(&orc_list);
......
...@@ -6,7 +6,7 @@ if [ -z "$SRCARCH" ]; then ...@@ -6,7 +6,7 @@ if [ -z "$SRCARCH" ]; then
exit 1 exit 1
fi fi
FILES="include/linux/objtool.h" FILES="include/linux/objtool_types.h"
if [ "$SRCARCH" = "x86" ]; then if [ "$SRCARCH" = "x86" ]; then
FILES="$FILES FILES="$FILES
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment