Commit 8301ae82 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/entry-s-to-c' into for-next/core

Move the synchronous exception paths from entry.S into a C file to
improve the code readability.

* for-next/entry-s-to-c:
  arm64: entry-common: don't touch daif before bp-hardening
  arm64: Remove asmlinkage from updated functions
  arm64: entry: convert el0_sync to C
  arm64: entry: convert el1_sync to C
  arm64: add local_daif_inherit()
  arm64: Add prototypes for functions called by entry.S
  arm64: remove __exception annotations
parents 346f6a46 bfe29874
...@@ -74,13 +74,4 @@ alternative_if ARM64_ALT_PAN_NOT_UAO ...@@ -74,13 +74,4 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0) SET_PSTATE_PAN(0)
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
/*
* Remove the address tag from a virtual address, if present.
*/
.macro untagged_addr, dst, addr
sbfx \dst, \addr, #0, #56
and \dst, \dst, \addr
.endm
#endif #endif
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/arch_gicv3.h> #include <asm/arch_gicv3.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/ptrace.h>
#define DAIF_PROCCTX 0 #define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT #define DAIF_PROCCTX_NOIRQ PSR_I_BIT
...@@ -109,4 +110,19 @@ static inline void local_daif_restore(unsigned long flags) ...@@ -109,4 +110,19 @@ static inline void local_daif_restore(unsigned long flags)
trace_hardirqs_off(); trace_hardirqs_off();
} }
/*
* Called by synchronous exception handlers to restore the DAIF bits that were
* modified by taking an exception.
*/
static inline void local_daif_inherit(struct pt_regs *regs)
{
unsigned long flags = regs->pstate & DAIF_MASK;
/*
* We can't use local_daif_restore(regs->pstate) here as
* system_has_prio_mask_debugging() won't restore the I bit if it can
* use the pmr instead.
*/
write_sysreg(flags, daif);
}
#endif #endif
...@@ -8,14 +8,15 @@ ...@@ -8,14 +8,15 @@
#define __ASM_EXCEPTION_H #define __ASM_EXCEPTION_H
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kprobes.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry #define __exception_irq_entry __irq_entry
#else #else
#define __exception_irq_entry __exception #define __exception_irq_entry __kprobes
#endif #endif
static inline u32 disr_to_esr(u64 disr) static inline u32 disr_to_esr(u64 disr)
...@@ -31,5 +32,22 @@ static inline u32 disr_to_esr(u64 disr) ...@@ -31,5 +32,22 @@ static inline u32 disr_to_esr(u64 disr)
} }
asmlinkage void enter_from_user_mode(void); asmlinkage void enter_from_user_mode(void);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
void do_sve_acc(unsigned int esr, struct pt_regs *regs);
void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
void do_sysinstr(unsigned int esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void el0_svc_handler(struct pt_regs *regs);
void el0_svc_compat_handler(struct pt_regs *regs);
void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
#endif /* __ASM_EXCEPTION_H */ #endif /* __ASM_EXCEPTION_H */
...@@ -26,10 +26,12 @@ ...@@ -26,10 +26,12 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/kasan.h>
#include <asm/lse.h> #include <asm/lse.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h> #include <asm/pointer_auth.h>
...@@ -214,6 +216,18 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -214,6 +216,18 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->sp = sp; regs->sp = sp;
} }
static inline bool is_ttbr0_addr(unsigned long addr)
{
/* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE;
}
static inline bool is_ttbr1_addr(unsigned long addr)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
}
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
......
...@@ -42,16 +42,6 @@ static inline int __in_irqentry_text(unsigned long ptr) ...@@ -42,16 +42,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__irqentry_text_end; ptr < (unsigned long)&__irqentry_text_end;
} }
static inline int in_exception_text(unsigned long ptr)
{
int in;
in = ptr >= (unsigned long)&__exception_text_start &&
ptr < (unsigned long)&__exception_text_end;
return in ? : __in_irqentry_text(ptr);
}
static inline int in_entry_text(unsigned long ptr) static inline int in_entry_text(unsigned long ptr)
{ {
return ptr >= (unsigned long)&__entry_text_start && return ptr >= (unsigned long)&__entry_text_start &&
......
...@@ -13,9 +13,9 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) ...@@ -13,9 +13,9 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
# Object file lists. # Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \ entry-common.o entry-fpsimd.o process.o ptrace.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \ setup.o signal.o sys.o stacktrace.o time.o traps.o \
hyp-stub.o psci.o cpu_ops.o insn.o \ io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \ return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \ cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \ smp.o smp_spin_table.o topology.o smccc-call.o \
......
// SPDX-License-Identifier: GPL-2.0
/*
* Exception handling code
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/context_tracking.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/esr.h>
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/sysreg.h>
static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
local_daif_inherit(regs);
far = untagged_addr(far);
do_mem_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el1_abort);
static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el1_pc);
static void el1_undef(struct pt_regs *regs)
{
local_daif_inherit(regs);
do_undefinstr(regs);
}
NOKPROBE_SYMBOL(el1_undef);
static void el1_inv(struct pt_regs *regs, unsigned long esr)
{
local_daif_inherit(regs);
bad_mode(regs, 0, esr);
}
NOKPROBE_SYMBOL(el1_inv);
static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
/*
* The CPU masked interrupts, and we are leaving them masked during
* do_debug_exception(). Update PMR as if we had called
* local_mask_daif().
*/
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
do_debug_exception(far, esr, regs);
}
NOKPROBE_SYMBOL(el1_dbg);
asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_CUR:
case ESR_ELx_EC_IABT_CUR:
el1_abort(regs, esr);
break;
/*
* We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
* recursive exception when trying to push the initial pt_regs.
*/
case ESR_ELx_EC_PC_ALIGN:
el1_pc(regs, esr);
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_UNKNOWN:
el1_undef(regs);
break;
case ESR_ELx_EC_BREAKPT_CUR:
case ESR_ELx_EC_SOFTSTP_CUR:
case ESR_ELx_EC_WATCHPT_CUR:
case ESR_ELx_EC_BRK64:
el1_dbg(regs, esr);
break;
default:
el1_inv(regs, esr);
};
}
NOKPROBE_SYMBOL(el1_sync_handler);
static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
far = untagged_addr(far);
do_mem_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el0_da);
static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
/*
* We've taken an instruction abort from userspace and not yet
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening();
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el0_ia);
static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs);
}
NOKPROBE_SYMBOL(el0_fpsimd_acc);
static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs);
}
NOKPROBE_SYMBOL(el0_sve_acc);
static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs);
}
NOKPROBE_SYMBOL(el0_fpsimd_exc);
static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_sysinstr(esr, regs);
}
NOKPROBE_SYMBOL(el0_sys);
static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el0_pc);
static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX_NOIRQ);
do_sp_pc_abort(regs->sp, esr, regs);
}
NOKPROBE_SYMBOL(el0_sp);
static void notrace el0_undef(struct pt_regs *regs)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs);
}
NOKPROBE_SYMBOL(el0_undef);
static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr);
}
NOKPROBE_SYMBOL(el0_inv);
static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
{
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
user_exit_irqoff();
do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
NOKPROBE_SYMBOL(el0_dbg);
static void notrace el0_svc(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
el0_svc_handler(regs);
}
NOKPROBE_SYMBOL(el0_svc);
asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_SVC64:
el0_svc(regs);
break;
case ESR_ELx_EC_DABT_LOW:
el0_da(regs, esr);
break;
case ESR_ELx_EC_IABT_LOW:
el0_ia(regs, esr);
break;
case ESR_ELx_EC_FP_ASIMD:
el0_fpsimd_acc(regs, esr);
break;
case ESR_ELx_EC_SVE:
el0_sve_acc(regs, esr);
break;
case ESR_ELx_EC_FP_EXC64:
el0_fpsimd_exc(regs, esr);
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_WFx:
el0_sys(regs, esr);
break;
case ESR_ELx_EC_SP_ALIGN:
el0_sp(regs, esr);
break;
case ESR_ELx_EC_PC_ALIGN:
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
el0_undef(regs);
break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:
case ESR_ELx_EC_BRK64:
el0_dbg(regs, esr);
break;
default:
el0_inv(regs, esr);
}
}
NOKPROBE_SYMBOL(el0_sync_handler);
#ifdef CONFIG_COMPAT
static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_cp15instr(esr, regs);
}
NOKPROBE_SYMBOL(el0_cp15);
static void notrace el0_svc_compat(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
el0_svc_compat_handler(regs);
}
NOKPROBE_SYMBOL(el0_svc_compat);
asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_SVC32:
el0_svc_compat(regs);
break;
case ESR_ELx_EC_DABT_LOW:
el0_da(regs, esr);
break;
case ESR_ELx_EC_IABT_LOW:
el0_ia(regs, esr);
break;
case ESR_ELx_EC_FP_ASIMD:
el0_fpsimd_acc(regs, esr);
break;
case ESR_ELx_EC_FP_EXC32:
el0_fpsimd_exc(regs, esr);
break;
case ESR_ELx_EC_PC_ALIGN:
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_CP14_64:
el0_undef(regs);
break;
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:
el0_cp15(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:
case ESR_ELx_EC_BKPT32:
el0_dbg(regs, esr);
break;
default:
el0_inv(regs, esr);
}
}
NOKPROBE_SYMBOL(el0_sync_compat_handler);
#endif /* CONFIG_COMPAT */
...@@ -578,76 +578,9 @@ ENDPROC(el1_error_invalid) ...@@ -578,76 +578,9 @@ ENDPROC(el1_error_invalid)
.align 6 .align 6
el1_sync: el1_sync:
kernel_entry 1 kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_ia:
/*
* Fall through to the Data abort case
*/
el1_da:
/*
* Data abort handling
*/
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
untagged_addr x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
kernel_exit 1
el1_pc:
/*
* PC alignment exception handling. We don't handle SP alignment faults,
* since we will have hit a recursive exception when trying to push the
* initial pt_regs.
*/
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
mov x2, sp
bl do_sp_pc_abort
ASM_BUG()
el1_undef:
/*
* Undefined instruction
*/
inherit_daif pstate=x23, tmp=x2
mov x0, sp mov x0, sp
bl do_undefinstr bl el1_sync_handler
kernel_exit 1 kernel_exit 1
el1_dbg:
/*
* Debug exception handling
*/
cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
gic_prio_kentry_setup tmp=x3
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
inherit_daif pstate=x23, tmp=x2
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
bl bad_mode
ASM_BUG()
ENDPROC(el1_sync) ENDPROC(el1_sync)
.align 6 .align 6
...@@ -714,71 +647,18 @@ ENDPROC(el1_irq) ...@@ -714,71 +647,18 @@ ENDPROC(el1_irq)
.align 6 .align 6
el0_sync: el0_sync:
kernel_entry 0 kernel_entry 0
mrs x25, esr_el1 // read the syndrome register mov x0, sp
lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class bl el0_sync_handler
cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state b ret_to_user
b.eq el0_svc
cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
cmp x24, #ESR_ELx_EC_SVE // SVE access
b.eq el0_sve_acc
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.align 6 .align 6
el0_sync_compat: el0_sync_compat:
kernel_entry 0, 32 kernel_entry 0, 32
mrs x25, esr_el1 // read the syndrome register
lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat
cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
el0_svc_compat:
gic_prio_kentry_setup tmp=x1
mov x0, sp mov x0, sp
bl el0_svc_compat_handler bl el0_sync_compat_handler
b ret_to_user b ret_to_user
ENDPROC(el0_sync)
.align 6 .align 6
el0_irq_compat: el0_irq_compat:
...@@ -788,139 +668,7 @@ el0_irq_compat: ...@@ -788,139 +668,7 @@ el0_irq_compat:
el0_error_compat: el0_error_compat:
kernel_entry 0, 32 kernel_entry 0, 32
b el0_error_naked b el0_error_naked
el0_cp15:
/*
* Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
*/
ct_user_exit_irqoff
enable_daif
mov x0, x25
mov x1, sp
bl do_cp15instr
b ret_to_user
#endif
el0_da:
/*
* Data abort handling
*/
mrs x26, far_el1
ct_user_exit_irqoff
enable_daif
untagged_addr x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
b ret_to_user
el0_ia:
/*
* Instruction abort handling
*/
mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
mov x0, x26
mov x1, x25
mov x2, sp
bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
ct_user_exit_irqoff
enable_daif
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
el0_sve_acc:
/*
* Scalable Vector Extension access
*/
ct_user_exit_irqoff
enable_daif
mov x0, x25
mov x1, sp
bl do_sve_acc
b ret_to_user
el0_fpsimd_exc:
/*
* Floating Point, Advanced SIMD or SVE exception
*/
ct_user_exit_irqoff
enable_daif
mov x0, x25
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
el0_sp:
ldr x26, [sp, #S_SP]
b el0_sp_pc
el0_pc:
mrs x26, far_el1
el0_sp_pc:
/*
* Stack or PC alignment exception handling
*/
gic_prio_kentry_setup tmp=x0
ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif #endif
mov x0, x26
mov x1, x25
mov x2, sp
bl do_sp_pc_abort
b ret_to_user
el0_undef:
/*
* Undefined instruction
*/
ct_user_exit_irqoff
enable_daif
mov x0, sp
bl do_undefinstr
b ret_to_user
el0_sys:
/*
* System instructions, for trapped cache maintenance instructions
*/
ct_user_exit_irqoff
enable_daif
mov x0, x25
mov x1, sp
bl do_sysinstr
b ret_to_user
el0_dbg:
/*
* Debug exception handling
*/
tbnz x24, #0, el0_inv // EL0 only
mrs x24, far_el1
gic_prio_kentry_setup tmp=x3
ct_user_exit_irqoff
mov x0, x24
mov x1, x25
mov x2, sp
bl do_debug_exception
enable_da_f
b ret_to_user
el0_inv:
ct_user_exit_irqoff
enable_daif
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
bl bad_el0_sync
b ret_to_user
ENDPROC(el0_sync)
.align 6 .align 6
el0_irq: el0_irq:
...@@ -999,17 +747,6 @@ finish_ret_to_user: ...@@ -999,17 +747,6 @@ finish_ret_to_user:
kernel_exit 0 kernel_exit 0
ENDPROC(ret_to_user) ENDPROC(ret_to_user)
/*
* SVC handler.
*/
.align 6
el0_svc:
gic_prio_kentry_setup tmp=x1
mov x0, sp
bl el0_svc_handler
b ret_to_user
ENDPROC(el0_svc)
.popsection // .entry.text .popsection // .entry.text
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
......
...@@ -920,7 +920,7 @@ void fpsimd_release_task(struct task_struct *dead_task) ...@@ -920,7 +920,7 @@ void fpsimd_release_task(struct task_struct *dead_task)
* would have disabled the SVE access trap for userspace during * would have disabled the SVE access trap for userspace during
* ret_to_user, making an SVE access trap impossible in that case. * ret_to_user, making an SVE access trap impossible in that case.
*/ */
asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{ {
/* Even if we chose not to use SVE, the hardware could still trap: */ /* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
...@@ -947,7 +947,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) ...@@ -947,7 +947,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
/* /*
* Trapped FP/ASIMD access. * Trapped FP/ASIMD access.
*/ */
asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{ {
/* TODO: implement lazy context saving/restoring */ /* TODO: implement lazy context saving/restoring */
WARN_ON(1); WARN_ON(1);
...@@ -956,7 +956,7 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) ...@@ -956,7 +956,7 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
/* /*
* Raise a SIGFPE for the current process. * Raise a SIGFPE for the current process.
*/ */
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{ {
unsigned int si_code = FPE_FLTUNK; unsigned int si_code = FPE_FLTUNK;
......
...@@ -455,10 +455,6 @@ int __init arch_populate_kprobe_blacklist(void) ...@@ -455,10 +455,6 @@ int __init arch_populate_kprobe_blacklist(void)
(unsigned long)__irqentry_text_end); (unsigned long)__irqentry_text_end);
if (ret) if (ret)
return ret; return ret;
ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
(unsigned long)__exception_text_end);
if (ret)
return ret;
ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start, ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
(unsigned long)__idmap_text_end); (unsigned long)__idmap_text_end);
if (ret) if (ret)
......
...@@ -154,14 +154,14 @@ static inline void sve_user_discard(void) ...@@ -154,14 +154,14 @@ static inline void sve_user_discard(void)
sve_user_disable(); sve_user_disable();
} }
asmlinkage void el0_svc_handler(struct pt_regs *regs) void el0_svc_handler(struct pt_regs *regs)
{ {
sve_user_discard(); sve_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
asmlinkage void el0_svc_compat_handler(struct pt_regs *regs) void el0_svc_compat_handler(struct pt_regs *regs)
{ {
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
compat_sys_call_table); compat_sys_call_table);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/kprobes.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/stack_pointer.h> #include <asm/stack_pointer.h>
...@@ -393,7 +394,7 @@ void arm64_notify_segfault(unsigned long addr) ...@@ -393,7 +394,7 @@ void arm64_notify_segfault(unsigned long addr)
force_signal_inject(SIGSEGV, code, addr); force_signal_inject(SIGSEGV, code, addr);
} }
asmlinkage void __exception do_undefinstr(struct pt_regs *regs) void do_undefinstr(struct pt_regs *regs)
{ {
/* check for AArch32 breakpoint instructions */ /* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs)) if (!aarch32_break_handler(regs))
...@@ -405,6 +406,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ...@@ -405,6 +406,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
BUG_ON(!user_mode(regs)); BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
} }
NOKPROBE_SYMBOL(do_undefinstr);
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \ if (address >= user_addr_max()) { \
...@@ -676,7 +678,7 @@ static const struct sys64_hook cp15_64_hooks[] = { ...@@ -676,7 +678,7 @@ static const struct sys64_hook cp15_64_hooks[] = {
{}, {},
}; };
asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) void do_cp15instr(unsigned int esr, struct pt_regs *regs)
{ {
const struct sys64_hook *hook, *hook_base; const struct sys64_hook *hook, *hook_base;
...@@ -714,9 +716,10 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) ...@@ -714,9 +716,10 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
*/ */
do_undefinstr(regs); do_undefinstr(regs);
} }
NOKPROBE_SYMBOL(do_cp15instr);
#endif #endif
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) void do_sysinstr(unsigned int esr, struct pt_regs *regs)
{ {
const struct sys64_hook *hook; const struct sys64_hook *hook;
...@@ -733,6 +736,7 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) ...@@ -733,6 +736,7 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
*/ */
do_undefinstr(regs); do_undefinstr(regs);
} }
NOKPROBE_SYMBOL(do_sysinstr);
static const char *esr_class_str[] = { static const char *esr_class_str[] = {
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
...@@ -802,7 +806,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -802,7 +806,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
* bad_el0_sync handles unexpected, but potentially recoverable synchronous * bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0. Unlike bad_mode, this returns. * exceptions taken from EL0. Unlike bad_mode, this returns.
*/ */
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{ {
void __user *pc = (void __user *)instruction_pointer(regs); void __user *pc = (void __user *)instruction_pointer(regs);
......
...@@ -111,9 +111,6 @@ SECTIONS ...@@ -111,9 +111,6 @@ SECTIONS
} }
.text : { /* Real text segment */ .text : { /* Real text segment */
_stext = .; /* Text and read-only data */ _stext = .; /* Text and read-only data */
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
ENTRY_TEXT ENTRY_TEXT
......
...@@ -32,7 +32,8 @@ ...@@ -32,7 +32,8 @@
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kasan.h> #include <asm/kprobes.h>
#include <asm/processor.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -101,18 +102,6 @@ static void mem_abort_decode(unsigned int esr) ...@@ -101,18 +102,6 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr); data_abort_decode(esr);
} }
static inline bool is_ttbr0_addr(unsigned long addr)
{
/* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE;
}
static inline bool is_ttbr1_addr(unsigned long addr)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
}
static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
{ {
/* Either init_pg_dir or swapper_pg_dir */ /* Either init_pg_dir or swapper_pg_dir */
...@@ -736,8 +725,7 @@ static const struct fault_info fault_info[] = { ...@@ -736,8 +725,7 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
}; };
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
struct pt_regs *regs)
{ {
const struct fault_info *inf = esr_to_fault_info(esr); const struct fault_info *inf = esr_to_fault_info(esr);
...@@ -753,43 +741,21 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, ...@@ -753,43 +741,21 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die(inf->name, regs, arm64_notify_die(inf->name, regs,
inf->sig, inf->code, (void __user *)addr, esr); inf->sig, inf->code, (void __user *)addr, esr);
} }
NOKPROBE_SYMBOL(do_mem_abort);
asmlinkage void __exception do_el0_irq_bp_hardening(void) void do_el0_irq_bp_hardening(void)
{ {
/* PC has already been checked in entry.S */ /* PC has already been checked in entry.S */
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
} }
NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
unsigned int esr,
struct pt_regs *regs)
{
/*
* We've taken an instruction abort from userspace and not yet
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(addr, esr, regs);
}
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{ {
if (user_mode(regs)) {
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
}
arm64_notify_die("SP/PC alignment exception", regs, arm64_notify_die("SP/PC alignment exception", regs,
SIGBUS, BUS_ADRALN, (void __user *)addr, esr); SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
} }
NOKPROBE_SYMBOL(do_sp_pc_abort);
int __init early_brk64(unsigned long addr, unsigned int esr, int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs); struct pt_regs *regs);
...@@ -872,8 +838,7 @@ NOKPROBE_SYMBOL(debug_exception_exit); ...@@ -872,8 +838,7 @@ NOKPROBE_SYMBOL(debug_exception_exit);
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
static int __exception static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{ {
if (user_mode(regs)) if (user_mode(regs))
return 0; return 0;
...@@ -892,15 +857,14 @@ cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) ...@@ -892,15 +857,14 @@ cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
return 1; return 1;
} }
#else #else
static int __exception static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{ {
return 0; return 0;
} }
#endif /* CONFIG_ARM64_ERRATUM_1463225 */ #endif /* CONFIG_ARM64_ERRATUM_1463225 */
NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler);
asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
unsigned int esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
const struct fault_info *inf = esr_to_debug_fault_info(esr); const struct fault_info *inf = esr_to_debug_fault_info(esr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment