Commit c28ac96b authored by Russell King's avatar Russell King

[ARM] Optimise kernel->userspace exit code a little.

This improves interrupts and exceptions returning to user space
by a few of cycles.  We change the requirements for
*_restore_user_regs slightly so we can remove a few instructions.
We also re-shuffle the code to handle pending work.
parent 2dc559c8
......@@ -725,7 +725,7 @@ __dabt_svc: sub sp, sp, #S_FRAME_SIZE
msr cpsr_c, r9
mov r2, sp
bl do_DataAbort
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC
disable_irq r0
ldr r0, [sp, #S_PSR]
msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
......@@ -776,9 +776,9 @@ svc_preempt: teq r9, #0 @ was preempt count = 0
movne pc, lr
mov r7, #PREEMPT_ACTIVE
str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE
1: set_cpsr_c r2, #MODE_SVC @ enable IRQs
1: enable_irq r2 @ enable IRQs
bl schedule
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs
disable_irq r0 @ disable IRQs
ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
beq preempt_return @ go again
......@@ -801,7 +801,7 @@ __und_svc: sub sp, sp, #S_FRAME_SIZE
mov r0, sp @ struct pt_regs *regs
bl do_undefinstr
1: set_cpsr_c r0, #PSR_I_BIT | MODE_SVC
1: disable_irq r0
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
......@@ -822,7 +822,7 @@ __pabt_svc: sub sp, sp, #S_FRAME_SIZE
mov r0, r2 @ address (pc)
mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC
disable_irq r0
ldr r0, [sp, #S_PSR]
msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
......@@ -861,7 +861,7 @@ __dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
#else
bl CPU_ABORT_HANDLER
#endif
set_cpsr_c r2, #MODE_SVC @ Enable interrupts
enable_irq r2 @ Enable interrupts
mov r2, sp
adrsvc al, lr, ret_from_exception
b do_DataAbort
......@@ -916,7 +916,7 @@ __und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
adrsvc al, r9, ret_from_exception @ r9 = normal FP return
adrsvc al, lr, fpundefinstr @ lr = undefined instr return
call_fpe: set_cpsr_c r0, #MODE_SVC @ Enable interrupts
call_fpe: enable_irq r0 @ Enable interrupts
get_thread_info r10 @ get current thread
ldr r4, [r10, #TI_TASK] @ get current task
mov r8, #1
......@@ -939,7 +939,7 @@ __pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr
alignment_trap r4, r7, __temp_abt
zero_fp
set_cpsr_c r0, #MODE_SVC @ Enable interrupts
enable_irq r0 @ Enable interrupts
mov r0, r5 @ address (pc)
mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler
......
......@@ -35,18 +35,27 @@ ENTRY(__do_softirq)
* stack.
*/
ret_fast_syscall:
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC @ disable interrupts
disable_irq r1 @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne ret_fast_work
bne fast_work_pending
fast_restore_user_regs
/*
* Ok, we need to do extra processing, enter the slow path.
*/
ret_fast_work:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
b work_pending
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_notify_resume
disable_irq r1 @ disable interrupts
b no_work_pending
work_resched:
bl schedule
......@@ -55,22 +64,12 @@ work_resched:
*/
ENTRY(ret_to_user)
ret_slow_syscall:
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC @ disable interrupts
disable_irq r1 @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
beq no_work_pending
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
blne __do_notify_resume
bne work_pending
no_work_pending:
restore_user_regs
__do_notify_resume:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
b do_notify_resume @ note the bl above sets lr
slow_restore_user_regs
/*
* This is how we return from a fork.
......@@ -80,9 +79,9 @@ ENTRY(ret_from_fork)
bl schedule_tail
#endif
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
......@@ -134,7 +133,7 @@ ENTRY(vector_swi)
ldr ip, [ip]
mcr p15, 0, ip, c1, c0 @ update control register
#endif
enable_irqs ip
enable_irq ip
str r4, [sp, #-S_OFF]! @ push fifth arg
......
......@@ -11,13 +11,13 @@
#define MODE_SVC 0x13
#endif
.macro zero_fp
.macro zero_fp
#ifndef CONFIG_NO_FRAME_POINTER
mov fp, #0
mov fp, #0
#endif
.endm
.endm
.text
.text
@ Bad Abort numbers
@ -----------------
......@@ -69,119 +69,146 @@
#define S_OFF 8
#ifdef CONFIG_CPU_32
.macro save_user_regs
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
add r8, sp, #S_PC
stmdb r8, {sp, lr}^ @ Calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
.endm
.macro restore_user_regs
ldr r0, [sp, #S_PSR] @ Get calling cpsr
mov ip, #PSR_I_BIT | MODE_SVC
msr cpsr_c, ip @ disable IRQs
msr spsr, r0 @ save in spsr_svc
ldr lr, [sp, #S_PC] @ Get PC
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
.endm
.macro fast_restore_user_regs
mov ip, #PSR_I_BIT | MODE_SVC
msr cpsr_c, ip @ disable IRQs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
.macro mask_pc, rd, rm
.endm
.macro enable_irqs, temp
mov \temp, #MODE_SVC
msr cpsr_c, \temp
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
/*
* Like adr, but force SVC mode (if required)
*/
.macro adrsvc, cond, reg, label
adr\cond \reg, \label
.endm
.macro alignment_trap, rbase, rtemp, sym
.macro set_cpsr_c, reg, mode
#if 1
/* broken binutils */
mov \reg, \mode
msr cpsr_c, \reg
#else
msr cpsr_c, \mode
#endif
.endm
.macro disable_irq, temp
set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
.endm
.macro enable_irq, temp
set_cpsr_c \temp, #MODE_SVC
.endm
.macro save_user_regs
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
add r8, sp, #S_PC
stmdb r8, {sp, lr}^ @ Calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
.endm
.macro restore_user_regs
ldr r1, [sp, #S_PSR] @ Get calling cpsr
disable_irq ip @ disable IRQs
ldr lr, [sp, #S_PC]! @ Get PC
msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
/*
* Must be called with IRQs already disabled.
*/
.macro fast_restore_user_regs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
/*
* Must be called with IRQs already disabled.
*/
.macro slow_restore_user_regs
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
.macro mask_pc, rd, rm
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
/*
* Like adr, but force SVC mode (if required)
*/
.macro adrsvc, cond, reg, label
adr\cond \reg, \label
.endm
.macro alignment_trap, rbase, rtemp, sym
#ifdef CONFIG_ALIGNMENT_TRAP
#define OFF_CR_ALIGNMENT(x) cr_alignment - x
ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)]
mcr p15, 0, \rtemp, c1, c0
ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)]
mcr p15, 0, \rtemp, c1, c0
#endif
.endm
.endm
#else
.macro save_user_regs
str r0, [sp, #-4]!
str lr, [sp, #-4]!
sub sp, sp, #15*4
stmia sp, {r0 - lr}^
mov r0, r0
.endm
.macro restore_user_regs
ldmia sp, {r0 - lr}^
mov r0, r0
ldr lr, [sp, #15*4]
add sp, sp, #15*4+8
movs pc, lr
.endm
.macro fast_restore_user_regs
add sp, sp, #S_OFF
ldmib sp, {r1 - lr}^
mov r0, r0
ldr lr, [sp, #15*4]
add sp, sp, #15*4+8
movs pc, lr
.endm
.macro mask_pc, rd, rm
bic \rd, \rm, #PCMASK
.endm
.macro enable_irqs, temp
teqp pc, #0x00000003
.endm
.macro initialise_traps_extra
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
/*
* Like adr, but force SVC mode (if required)
*/
.macro adrsvc, cond, reg, label
adr\cond \reg, \label
orr\cond \reg, \reg, #0x08000003
.endm
.macro save_user_regs
str r0, [sp, #-4]!
str lr, [sp, #-4]!
sub sp, sp, #15*4
stmia sp, {r0 - lr}^
mov r0, r0
.endm
.macro restore_user_regs
ldmia sp, {r0 - lr}^
mov r0, r0
ldr lr, [sp, #15*4]
add sp, sp, #15*4+8
movs pc, lr
.endm
.macro fast_restore_user_regs
add sp, sp, #S_OFF
ldmib sp, {r1 - lr}^
mov r0, r0
ldr lr, [sp, #15*4]
add sp, sp, #15*4+8
movs pc, lr
.endm
.macro mask_pc, rd, rm
bic \rd, \rm, #PCMASK
.endm
.macro enable_irqs, temp
teqp pc, #0x00000003
.endm
.macro initialise_traps_extra
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
/*
* Like adr, but force SVC mode (if required)
*/
.macro adrsvc, cond, reg, label
adr\cond \reg, \label
orr\cond \reg, \reg, #0x08000003
.endm
#endif
......@@ -215,13 +242,3 @@ tsk .req r9 @ current thread_info
ldr scno, [lr, #-4] @ get SWI instruction
#endif
.endm
.macro set_cpsr_c, reg, mode
#if 1
mov \reg, \mode
msr cpsr_c, \reg
#else
msr cpsr_c, \mode
#endif
.endm
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment