Commit bf3f0f33 authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 7784/1: mm: ensure SMP alternates assemble to exactly 4 bytes with Thumb-2

Commit ae8a8b95 ("ARM: 7691/1: mm: kill unused TLB_CAN_READ_FROM_L1_CACHE
and use ALT_SMP instead") added early function returns for page table
cache flushing operations on ARMv7 SMP CPUs.

Unfortunately, when targetting Thumb-2, these `mov pc, lr' sequences
assemble to 2 bytes which can lead to corruption of the instruction
stream after code patching.

This patch fixes the alternates to use wide (32-bit) instructions for
Thumb-2, therefore ensuring that the patching code works correctly.

Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent b6992fa9
...@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! ) ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 ) THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] ) THUMB( str r3, [r0] )
ALT_SMP(mov pc,lr) ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif #endif
mov pc, lr mov pc, lr
......
...@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0] 1: strd r2, r3, [r0]
ALT_SMP(mov pc, lr) ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif #endif
mov pc, lr mov pc, lr
......
...@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle) ...@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area) ENTRY(cpu_v7_dcache_clean_area)
ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
ALT_UP(W(nop)) ALT_UP_B(1f)
dcache_line_size r2, r3 mov pc, lr
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: dcache_line_size r2, r3
2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2 add r0, r0, r2
subs r1, r1, r2 subs r1, r1, r2
bhi 1b bhi 2b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area) ENDPROC(cpu_v7_dcache_clean_area)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment