Commit 238962ac authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 8191/1: decompressor: ensure I-side picks up relocated code

To speed up decompression, the decompressor sets up a flat, cacheable
mapping of memory. However, when there is insufficient space to hold
the page tables for this mapping, we don't bother to enable the caches
and subsequently skip all the cache maintenance hooks.

Skipping the cache maintenance before jumping to the relocated code
allows the processor to predict the branch and populate the I-cache
with stale data before the relocation loop has completed (since a
bootloader may have SCTLR.I set, which permits normal, cacheable
instruction fetches regardless of SCTLR.M).

This patch moves the cache maintenance check into the maintenance
routines themselves, allowing the v6/v7 versions to invalidate the
I-cache regardless of the MMU state.

Cc: <stable@vger.kernel.org>
Reported-by: default avatarMarc Carino <marc.ceeeee@gmail.com>
Tested-by: default avatarJulien Grall <julien.grall@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 9ff0bb5b
...@@ -397,8 +397,7 @@ dtb_check_done: ...@@ -397,8 +397,7 @@ dtb_check_done:
add sp, sp, r6 add sp, sp, r6
#endif #endif
tst r4, #1 bl cache_clean_flush
bleq cache_clean_flush
adr r0, BSYM(restart) adr r0, BSYM(restart)
add r0, r0, r6 add r0, r0, r6
...@@ -1047,6 +1046,8 @@ cache_clean_flush: ...@@ -1047,6 +1046,8 @@ cache_clean_flush:
b call_cache_fn b call_cache_fn
__armv4_mpu_cache_flush: __armv4_mpu_cache_flush:
tst r4, #1
movne pc, lr
mov r2, #1 mov r2, #1
mov r3, #0 mov r3, #0
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
...@@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush: ...@@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush:
mov pc, lr mov pc, lr
__fa526_cache_flush: __fa526_cache_flush:
tst r4, #1
movne pc, lr
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
mcr p15, 0, r1, c7, c5, 0 @ flush I cache mcr p15, 0, r1, c7, c5, 0 @ flush I cache
...@@ -1072,13 +1075,16 @@ __fa526_cache_flush: ...@@ -1072,13 +1075,16 @@ __fa526_cache_flush:
__armv6_mmu_cache_flush: __armv6_mmu_cache_flush:
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D tst r4, #1
mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
__armv7_mmu_cache_flush: __armv7_mmu_cache_flush:
tst r4, #1
bne iflush
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
tst r10, #0xf << 16 @ hierarchical cache (ARMv7) tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
mov r10, #0 mov r10, #0
...@@ -1139,6 +1145,8 @@ iflush: ...@@ -1139,6 +1145,8 @@ iflush:
mov pc, lr mov pc, lr
__armv5tej_mmu_cache_flush: __armv5tej_mmu_cache_flush:
tst r4, #1
movne pc, lr
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache mcr p15, 0, r0, c7, c5, 0 @ flush I cache
...@@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush: ...@@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush:
mov pc, lr mov pc, lr
__armv4_mmu_cache_flush: __armv4_mmu_cache_flush:
tst r4, #1
movne pc, lr
mov r2, #64*1024 @ default: 32K dcache size (*2) mov r2, #64*1024 @ default: 32K dcache size (*2)
mov r11, #32 @ default: 32 byte line size mov r11, #32 @ default: 32 byte line size
mrc p15, 0, r3, c0, c0, 1 @ read cache type mrc p15, 0, r3, c0, c0, 1 @ read cache type
...@@ -1179,6 +1189,8 @@ no_cache_id: ...@@ -1179,6 +1189,8 @@ no_cache_id:
__armv3_mmu_cache_flush: __armv3_mmu_cache_flush:
__armv3_mpu_cache_flush: __armv3_mpu_cache_flush:
tst r4, #1
movne pc, lr
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr mov pc, lr
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment