Commit 3302cadd authored by Russell King's avatar Russell King

ARM: entry: efficiency cleanups

Make the "fast" syscall return path fast again.  The addition of IRQ
tracing and context tracking has made this path grossly inefficient.
We can do much better if these options are enabled if we save the
syscall return code on the stack - we then don't need to save a bunch
of registers around every single callout to C code.
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 01e09a28
...@@ -108,29 +108,37 @@ ...@@ -108,29 +108,37 @@
.endm .endm
#endif #endif
.macro asm_trace_hardirqs_off .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS) #if defined(CONFIG_TRACE_IRQFLAGS)
.if \save
stmdb sp!, {r0-r3, ip, lr} stmdb sp!, {r0-r3, ip, lr}
.endif
bl trace_hardirqs_off bl trace_hardirqs_off
.if \save
ldmia sp!, {r0-r3, ip, lr} ldmia sp!, {r0-r3, ip, lr}
.endif
#endif #endif
.endm .endm
.macro asm_trace_hardirqs_on, cond=al .macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS) #if defined(CONFIG_TRACE_IRQFLAGS)
/* /*
* actually the registers should be pushed and pop'd conditionally, but * actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered * after bl the flags are certainly clobbered
*/ */
.if \save
stmdb sp!, {r0-r3, ip, lr} stmdb sp!, {r0-r3, ip, lr}
.endif
bl\cond trace_hardirqs_on bl\cond trace_hardirqs_on
.if \save
ldmia sp!, {r0-r3, ip, lr} ldmia sp!, {r0-r3, ip, lr}
.endif
#endif #endif
.endm .endm
.macro disable_irq .macro disable_irq, save=1
disable_irq_notrace disable_irq_notrace
asm_trace_hardirqs_off asm_trace_hardirqs_off \save
.endm .endm
.macro enable_irq .macro enable_irq
......
...@@ -136,22 +136,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, ...@@ -136,22 +136,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/* /*
* thread information flags: * thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SYSCAL_AUDIT - syscall auditing active
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/ */
#define TIF_SIGPENDING 0 #define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 #define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 7 #define TIF_UPROBE 3 /* breakpointed or singlestepping */
#define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 10 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
#define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17 #define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
......
...@@ -24,35 +24,55 @@ ...@@ -24,35 +24,55 @@
.align 5 .align 5
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/* /*
* This is the fast syscall return path. We do as little as * This is the fast syscall return path. We do as little as possible here,
* possible here, and this includes saving r0 back into the SVC * such as avoiding writing r0 to the stack. We only use this path if we
* stack. * have tracing and context tracking disabled - the overheads from those
* features make this path too inefficient.
*/ */
ret_fast_syscall: ret_fast_syscall:
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.cantunwind ) UNWIND(.cantunwind )
disable_irq @ disable interrupts disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne __sys_trace_return
tst r1, #_TIF_WORK_MASK
bne fast_work_pending bne fast_work_pending
asm_trace_hardirqs_on
/* perform architecture specific actions before user return */ /* perform architecture specific actions before user return */
arch_ret_to_user r1, lr arch_ret_to_user r1, lr
ct_user_enter
restore_user_regs fast = 1, offset = S_OFF restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend ) UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* /* Ok, we need to do extra processing, enter the slow path. */
* Ok, we need to do extra processing, enter the slow path.
*/
fast_work_pending: fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0 str r0, [sp, #S_R0+S_OFF]! @ returned r0
work_pending: /* fall through to work_pending */
#else
/*
* The "replacement" ret_fast_syscall for when tracing or context tracking
* is enabled. As we will need to call out to some C functions, we save
* r0 first to avoid needing to save registers around each C function call.
*/
ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
#endif
tst r1, #_TIF_SYSCALL_WORK
bne __sys_trace_return_nosave
slow_work_pending:
mov r0, sp @ 'regs' mov r0, sp @ 'regs'
mov r2, why @ 'syscall' mov r2, why @ 'syscall'
bl do_work_pending bl do_work_pending
...@@ -64,16 +84,19 @@ work_pending: ...@@ -64,16 +84,19 @@ work_pending:
/* /*
* "slow" syscall return path. "why" tells us if this was a real syscall. * "slow" syscall return path. "why" tells us if this was a real syscall.
* IRQs may be enabled here, so always disable them. Note that we use the
* "notrace" version to avoid calling into the tracing code unnecessarily.
* do_work_pending() will update this state if necessary.
*/ */
ENTRY(ret_to_user) ENTRY(ret_to_user)
ret_slow_syscall: ret_slow_syscall:
disable_irq @ disable interrupts disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq) ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS] ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK tst r1, #_TIF_WORK_MASK
bne work_pending bne slow_work_pending
no_work_pending: no_work_pending:
asm_trace_hardirqs_on asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */ /* perform architecture specific actions before user return */
arch_ret_to_user r1, lr arch_ret_to_user r1, lr
...@@ -251,6 +274,12 @@ __sys_trace_return: ...@@ -251,6 +274,12 @@ __sys_trace_return:
bl syscall_trace_exit bl syscall_trace_exit
b ret_slow_syscall b ret_slow_syscall
__sys_trace_return_nosave:
asm_trace_hardirqs_off save=0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
.align 5 .align 5
#ifdef CONFIG_ALIGNMENT_TRAP #ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object .type __cr_alignment, #object
......
...@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall) ...@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
asmlinkage int asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{ {
/*
* The assembly code enters us with IRQs off, but it hasn't
* informed the tracing code of that for efficiency reasons.
* Update the trace code with the current status.
*/
trace_hardirqs_off();
do { do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) { if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule(); schedule();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment