Commit 1a7f67e6 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/entry' into for-next/core

* for-next/entry:
  : More entry.S clean-ups and conversion to C.
  arm64: entry: call exit_to_user_mode() from C
  arm64: entry: move bulk of ret_to_user to C
  arm64: entry: clarify entry/exit helpers
  arm64: entry: consolidate entry/exit helpers
parents 622909e5 e130338e
...@@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs); ...@@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
asmlinkage void call_on_irq_stack(struct pt_regs *regs, asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *)); void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void); asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
asmlinkage void exit_to_user_mode(void);
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs); void do_bti(struct pt_regs *regs);
...@@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs); ...@@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
void do_serror(struct pt_regs *regs, unsigned int esr); void do_serror(struct pt_regs *regs, unsigned int esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */ #endif /* __ASM_EXCEPTION_H */
...@@ -26,10 +26,14 @@ ...@@ -26,10 +26,14 @@
#include <asm/system_misc.h> #include <asm/system_misc.h>
/* /*
* Handle IRQ/context state management when entering from kernel mode.
* Before this function is called it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception.
*
* This is intended to match the logic in irqentry_enter(), handling the kernel * This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only. * mode transitions only.
*/ */
static void noinstr enter_from_kernel_mode(struct pt_regs *regs) static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
{ {
regs->exit_rcu = false; regs->exit_rcu = false;
...@@ -45,20 +49,26 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) ...@@ -45,20 +49,26 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
lockdep_hardirqs_off(CALLER_ADDR0); lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick(); rcu_irq_enter_check_tick();
trace_hardirqs_off_finish(); trace_hardirqs_off_finish();
}
static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
{
__enter_from_kernel_mode(regs);
mte_check_tfsr_entry(); mte_check_tfsr_entry();
} }
/* /*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception.
*
* This is intended to match the logic in irqentry_exit(), handling the kernel * This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere. * mode transitions only, and with preemption handled elsewhere.
*/ */
static void noinstr exit_to_kernel_mode(struct pt_regs *regs) static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
mte_check_tfsr_exit();
if (interrupts_enabled(regs)) { if (interrupts_enabled(regs)) {
if (regs->exit_rcu) { if (regs->exit_rcu) {
trace_hardirqs_on_prepare(); trace_hardirqs_on_prepare();
...@@ -75,6 +85,71 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) ...@@ -75,6 +85,71 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
} }
} }
static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
mte_check_tfsr_exit();
__exit_to_kernel_mode(regs);
}
/*
* Handle IRQ/context state management when entering from user mode.
* Before this function is called it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception.
*/
static __always_inline void __enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
}
static __always_inline void enter_from_user_mode(struct pt_regs *regs)
{
__enter_from_user_mode();
}
/*
* Handle IRQ/context state management when exiting to user mode.
* After this function returns it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception.
*/
static __always_inline void __exit_to_user_mode(void)
{
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
{
unsigned long flags;
local_daif_mask();
flags = READ_ONCE(current_thread_info()->flags);
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
}
static __always_inline void exit_to_user_mode(struct pt_regs *regs)
{
prepare_exit_to_user_mode(regs);
mte_check_tfsr_exit();
__exit_to_user_mode();
}
asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
{
exit_to_user_mode(regs);
}
/*
* Handle IRQ/context state management when entering an NMI from user/kernel
* mode. Before this function is called it is not safe to call regular kernel
* code, intrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_enter_nmi(struct pt_regs *regs) static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{ {
regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
...@@ -88,6 +163,11 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs) ...@@ -88,6 +163,11 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
ftrace_nmi_enter(); ftrace_nmi_enter();
} }
/*
* Handle IRQ/context state management when exiting an NMI from user/kernel
* mode. After this function returns it is not safe to call regular kernel
* code, intrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_exit_nmi(struct pt_regs *regs) static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{ {
bool restore = regs->lockdep_hardirqs; bool restore = regs->lockdep_hardirqs;
...@@ -105,6 +185,40 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) ...@@ -105,6 +185,40 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit(); __nmi_exit();
} }
/*
* Handle IRQ/context state management when entering a debug exception from
* kernel mode. Before this function is called it is not safe to call regular
* kernel code, intrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_nmi_enter();
trace_hardirqs_off_finish();
}
/*
* Handle IRQ/context state management when exiting a debug exception from
* kernel mode. After this function returns it is not safe to call regular
* kernel code, intrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
}
rcu_nmi_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
}
static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
...@@ -265,30 +379,6 @@ static void noinstr el1_undef(struct pt_regs *regs) ...@@ -265,30 +379,6 @@ static void noinstr el1_undef(struct pt_regs *regs)
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs);
} }
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_nmi_enter();
trace_hardirqs_off_finish();
}
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
}
rcu_nmi_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
}
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{ {
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
...@@ -382,31 +472,14 @@ asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) ...@@ -382,31 +472,14 @@ asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
arm64_exit_nmi(regs); arm64_exit_nmi(regs);
} }
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
}
asmlinkage void noinstr exit_to_user_mode(void)
{
mte_check_tfsr_exit();
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
{ {
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs); do_mem_abort(far, esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
...@@ -421,37 +494,42 @@ static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) ...@@ -421,37 +494,42 @@ static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(far)) if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs); do_mem_abort(far, esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs); do_fpsimd_acc(esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs); do_sve_acc(esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs); do_fpsimd_exc(esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sysinstr(esr, regs); do_sysinstr(esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
...@@ -461,37 +539,42 @@ static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) ...@@ -461,37 +539,42 @@ static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(instruction_pointer(regs))) if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs); do_sp_pc_abort(far, esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs); do_sp_pc_abort(regs->sp, esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_undef(struct pt_regs *regs) static void noinstr el0_undef(struct pt_regs *regs)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs); do_undefinstr(regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_bti(struct pt_regs *regs) static void noinstr el0_bti(struct pt_regs *regs)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_bti(regs); do_bti(regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr); bad_el0_sync(regs, 0, esr);
exit_to_user_mode(regs);
} }
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
...@@ -499,23 +582,26 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) ...@@ -499,23 +582,26 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
enter_from_user_mode(); enter_from_user_mode(regs);
do_debug_exception(far, esr, regs); do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
exit_to_user_mode(regs);
} }
static void noinstr el0_svc(struct pt_regs *regs) static void noinstr el0_svc(struct pt_regs *regs)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler(); cortex_a76_erratum_1463225_svc_handler();
do_el0_svc(regs); do_el0_svc(regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_ptrauth_fault(regs, esr); do_ptrauth_fault(regs, esr);
exit_to_user_mode(regs);
} }
asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
...@@ -574,7 +660,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) ...@@ -574,7 +660,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
static void noinstr el0_interrupt(struct pt_regs *regs, static void noinstr el0_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *)) void (*handler)(struct pt_regs *))
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
write_sysreg(DAIF_PROCCTX_NOIRQ, daif); write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
...@@ -582,6 +668,8 @@ static void noinstr el0_interrupt(struct pt_regs *regs, ...@@ -582,6 +668,8 @@ static void noinstr el0_interrupt(struct pt_regs *regs,
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
do_interrupt_handler(regs, handler); do_interrupt_handler(regs, handler);
exit_to_user_mode(regs);
} }
static void noinstr __el0_irq_handler_common(struct pt_regs *regs) static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
...@@ -608,12 +696,13 @@ static void noinstr __el0_error_handler_common(struct pt_regs *regs) ...@@ -608,12 +696,13 @@ static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_ERRCTX); local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs); arm64_enter_nmi(regs);
do_serror(regs, esr); do_serror(regs, esr);
arm64_exit_nmi(regs); arm64_exit_nmi(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
exit_to_user_mode(regs);
} }
asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
...@@ -624,16 +713,18 @@ asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) ...@@ -624,16 +713,18 @@ asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_cp15instr(esr, regs); do_cp15instr(esr, regs);
exit_to_user_mode(regs);
} }
static void noinstr el0_svc_compat(struct pt_regs *regs) static void noinstr el0_svc_compat(struct pt_regs *regs)
{ {
enter_from_user_mode(); enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler(); cortex_a76_erratum_1463225_svc_handler();
do_el0_svc_compat(regs); do_el0_svc_compat(regs);
exit_to_user_mode(regs);
} }
asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
......
...@@ -29,16 +29,6 @@ ...@@ -29,16 +29,6 @@
#include <asm/asm-uaccess.h> #include <asm/asm-uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
/*
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
#endif
.endm
.macro clear_gp_regs .macro clear_gp_regs
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
mov x\n, xzr mov x\n, xzr
...@@ -492,18 +482,6 @@ SYM_CODE_END(__swpan_exit_el0) ...@@ -492,18 +482,6 @@ SYM_CODE_END(__swpan_exit_el0)
/* GPRs used by entry code */ /* GPRs used by entry code */
tsk .req x28 // current thread_info tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
msr_s SYS_ICC_PMR_EL1, \tmp
alternative_else_nop_endif
#endif
.endm
.text .text
/* /*
...@@ -605,35 +583,13 @@ SYM_CODE_START_LOCAL(ret_to_kernel) ...@@ -605,35 +583,13 @@ SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1 kernel_exit 1
SYM_CODE_END(ret_to_kernel) SYM_CODE_END(ret_to_kernel)
/*
* "slow" syscall return path.
*/
SYM_CODE_START_LOCAL(ret_to_user) SYM_CODE_START_LOCAL(ret_to_user)
disable_daif ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
gic_prio_kentry_setup tmp=x3
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ldr x19, [tsk, #TSK_TI_FLAGS]
and x2, x19, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
user_enter_irqoff
enable_step_tsk x19, x2 enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase bl stackleak_erase
#endif #endif
kernel_exit 0 kernel_exit 0
/*
* Ok, we need to do extra processing, enter the slow path.
*/
work_pending:
mov x0, sp // 'regs'
mov x1, x19
bl do_notify_resume
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user) SYM_CODE_END(ret_to_user)
.popsection // .entry.text .popsection // .entry.text
...@@ -799,6 +755,8 @@ SYM_CODE_START(ret_from_fork) ...@@ -799,6 +755,8 @@ SYM_CODE_START(ret_from_fork)
mov x0, x20 mov x0, x20
blr x19 blr x19
1: get_current_task tsk 1: get_current_task tsk
mov x0, sp
bl asm_exit_to_user_mode
b ret_to_user b ret_to_user
SYM_CODE_END(ret_from_fork) SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork) NOKPROBE(ret_from_fork)
......
...@@ -929,8 +929,7 @@ static bool cpu_affinity_invalid(struct pt_regs *regs) ...@@ -929,8 +929,7 @@ static bool cpu_affinity_invalid(struct pt_regs *regs)
system_32bit_el0_cpumask()); system_32bit_el0_cpumask());
} }
asmlinkage void do_notify_resume(struct pt_regs *regs, void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
unsigned long thread_flags)
{ {
do { do {
if (thread_flags & _TIF_NEED_RESCHED) { if (thread_flags & _TIF_NEED_RESCHED) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment