Commit 1fcedc2b authored by Anton Blanchard's avatar Anton Blanchard Committed by Linus Torvalds

[PATCH] ppc64: exception path optimisations

- We were statically predicting syscalls would be 32bit which meant every
  64bit syscall was guaranteed to be mispredicted. Just let the hardware
  predict this one.

- We shouldnt use blrl for indirect function calls, it is unlikely to be
  predicted correctly and corrupts the link prediction stack. We should
  use bctrl instead.

- Statically predict a branch in the system call path, favouring calls from
  userspace.

- Remove static prediction in pagefault path, hardware prediction should do
  a better job here.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bf52c245
...@@ -132,7 +132,7 @@ system_call: /* label this so stack traces look sane */ ...@@ -132,7 +132,7 @@ system_call: /* label this so stack traces look sane */
*/ */
ld r11,.SYS_CALL_TABLE@toc(2) ld r11,.SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT andi. r10,r10,_TIF_32BIT
beq- 15f beq 15f
ld r11,.SYS_CALL_TABLE32@toc(2) ld r11,.SYS_CALL_TABLE32@toc(2)
clrldi r3,r3,32 clrldi r3,r3,32
clrldi r4,r4,32 clrldi r4,r4,32
...@@ -143,8 +143,8 @@ system_call: /* label this so stack traces look sane */ ...@@ -143,8 +143,8 @@ system_call: /* label this so stack traces look sane */
15: 15:
slwi r0,r0,3 slwi r0,r0,3
ldx r10,r11,r0 /* Fetch system call handler [ptr] */ ldx r10,r11,r0 /* Fetch system call handler [ptr] */
mtlr r10 mtctr r10
blrl /* Call handler */ bctrl /* Call handler */
syscall_exit: syscall_exit:
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
...@@ -182,7 +182,7 @@ syscall_exit_trace_cont: ...@@ -182,7 +182,7 @@ syscall_exit_trace_cont:
stdcx. r0,0,r1 /* to clear the reservation */ stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR andi. r6,r8,MSR_PR
ld r4,_LINK(r1) ld r4,_LINK(r1)
beq 1f /* only restore r13 if */ beq- 1f /* only restore r13 if */
ld r13,GPR13(r1) /* returning to usermode */ ld r13,GPR13(r1) /* returning to usermode */
1: ld r2,GPR2(r1) 1: ld r2,GPR2(r1)
ld r1,GPR1(r1) ld r1,GPR1(r1)
......
...@@ -1028,7 +1028,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) ...@@ -1028,7 +1028,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
bl .local_irq_restore bl .local_irq_restore
b 11f b 11f
#else #else
beq+ fast_exception_return /* Return from exception on success */ beq fast_exception_return /* Return from exception on success */
/* fall through */ /* fall through */
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment