Commit 8d66772e authored by James Morse's avatar James Morse Committed by Will Deacon

arm64: Mask all exceptions during kernel_exit

To take RAS Exceptions as quickly as possible we need to keep SError
unmasked as much as possible. We need to mask it during kernel_exit
as taking an error from this code will overwrite the exception-registers.

Adding a naked 'disable_daif' to kernel_exit causes a performance problem
for micro-benchmarks that do no real work, (e.g. calling getpid() in a
loop). This is because the ret_to_user loop has already masked IRQs so
that the TIF_WORK_MASK thread flags can't change underneath it, adding
disable_daif is an additional self-synchronising operation.

In the future, the RAS APEI code may need to modify the TIF_WORK_MASK
flags from an SError, in which case the ret_to_user loop must mask SError
while it examines the flags.

Disable all exceptions for return to EL1. For return to EL0 get the
ret_to_user loop to leave all exceptions masked once it has done its
work, this avoids an extra pstate-write.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 41bd5b5d
...@@ -221,6 +221,8 @@ alternative_else_nop_endif ...@@ -221,6 +221,8 @@ alternative_else_nop_endif
.macro kernel_exit, el .macro kernel_exit, el
.if \el != 0 .if \el != 0
disable_daif
/* Restore the task's original addr_limit. */ /* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT] ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [tsk, #TSK_TI_ADDR_LIMIT]
...@@ -517,8 +519,6 @@ el1_da: ...@@ -517,8 +519,6 @@ el1_da:
mov x2, sp // struct pt_regs mov x2, sp // struct pt_regs
bl do_mem_abort bl do_mem_abort
// disable interrupts before pulling preserved data off the stack
disable_irq
kernel_exit 1 kernel_exit 1
el1_sp_pc: el1_sp_pc:
/* /*
...@@ -793,7 +793,7 @@ ENDPROC(el0_irq) ...@@ -793,7 +793,7 @@ ENDPROC(el0_irq)
* and this includes saving x0 back into the kernel stack. * and this includes saving x0 back into the kernel stack.
*/ */
ret_fast_syscall: ret_fast_syscall:
disable_irq // disable interrupts disable_daif
str x0, [sp, #S_X0] // returned x0 str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK and x2, x1, #_TIF_SYSCALL_WORK
...@@ -803,7 +803,7 @@ ret_fast_syscall: ...@@ -803,7 +803,7 @@ ret_fast_syscall:
enable_step_tsk x1, x2 enable_step_tsk x1, x2
kernel_exit 0 kernel_exit 0
ret_fast_syscall_trace: ret_fast_syscall_trace:
enable_irq // enable interrupts enable_daif
b __sys_trace_return_skipped // we already saved x0 b __sys_trace_return_skipped // we already saved x0
/* /*
...@@ -821,7 +821,7 @@ work_pending: ...@@ -821,7 +821,7 @@ work_pending:
* "slow" syscall return path. * "slow" syscall return path.
*/ */
ret_to_user: ret_to_user:
disable_irq // disable interrupts disable_daif
ldr x1, [tsk, #TSK_TI_FLAGS] ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending cbnz x2, work_pending
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -756,9 +757,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, ...@@ -756,9 +757,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
addr_limit_user_check(); addr_limit_user_check();
if (thread_flags & _TIF_NEED_RESCHED) { if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */
local_daif_restore(DAIF_PROCCTX_NOIRQ);
schedule(); schedule();
} else { } else {
local_irq_enable(); local_daif_restore(DAIF_PROCCTX);
if (thread_flags & _TIF_UPROBE) if (thread_flags & _TIF_UPROBE)
uprobe_notify_resume(regs); uprobe_notify_resume(regs);
...@@ -775,7 +779,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, ...@@ -775,7 +779,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
fpsimd_restore_current_state(); fpsimd_restore_current_state();
} }
local_irq_disable(); local_daif_mask();
thread_flags = READ_ONCE(current_thread_info()->flags); thread_flags = READ_ONCE(current_thread_info()->flags);
} while (thread_flags & _TIF_WORK_MASK); } while (thread_flags & _TIF_WORK_MASK);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment