Commit 7f9afa4f authored by Russell King's avatar Russell King

[ARM] Fix a collection of missed changes from cache API changes.

parent 452ae362
......@@ -362,7 +362,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
* Ensure that the instruction cache sees
* the return code written onto the stack.
*/
cpu_icache_invalidate_range((unsigned long)rc,
flush_icache_range((unsigned long)rc,
(unsigned long)(rc + 1));
retcode = ((unsigned long)rc) + thumb;
......@@ -570,6 +570,9 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
/* ldr pc, [sp], #12 */
put_user(0xe49df00c, &usp[2]);
flush_icache_range((unsigned long)usp,
(unsigned long)(usp + 3));
regs->ARM_pc = regs->ARM_sp + 4;
}
}
......
......@@ -54,7 +54,7 @@ ENTRY(sa1100_cpu_suspend)
str r0, [r1]
@ clean data cache and invalidate WB
bl cpu_sa1100_cache_clean_invalidate_all
bl v4wb_flush_kern_cache_all
@ disable clock switching
mcr p15, 0, r1, c15, c2, 2
......
......@@ -330,6 +330,7 @@ arm3_elf_name: .asciz "v2"
.long arm2_processor_functions
.long 0
.long 0
.long 0
.long 0x41560250
.long 0xfffffff0
......@@ -342,6 +343,7 @@ arm3_elf_name: .asciz "v2"
.long arm250_processor_functions
.long 0
.long 0
.long 0
.long 0x41560300
.long 0xfffffff0
......@@ -354,4 +356,5 @@ arm3_elf_name: .asciz "v2"
.long arm3_processor_functions
.long 0
.long 0
.long 0
......@@ -16,178 +16,162 @@
#include <asm/constants.h>
#include <asm/procinfo.h>
/*
* Function: arm6_7_cache_clean_invalidate_all (void)
* : arm6_7_cache_clean_invalidate_page (unsigned long address, int size, int flags)
*
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
*
* Purpose : Flush all cache lines
*/
ENTRY(cpu_arm6_cache_clean_invalidate_all)
ENTRY(cpu_arm7_cache_clean_invalidate_all)
ENTRY(cpu_arm6_cache_clean_invalidate_range)
ENTRY(cpu_arm7_cache_clean_invalidate_range)
ENTRY(cpu_arm6_icache_invalidate_range)
ENTRY(cpu_arm7_icache_invalidate_range)
ENTRY(cpu_arm6_icache_invalidate_page)
ENTRY(cpu_arm7_icache_invalidate_page)
ENTRY(cpu_arm6_dcache_clean_range)
ENTRY(cpu_arm7_dcache_clean_range)
ENTRY(cpu_arm6_dcache_invalidate_range)
ENTRY(cpu_arm7_dcache_invalidate_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush cache
ENTRY(cpu_arm6_dcache_clean_page)
ENTRY(cpu_arm7_dcache_clean_page)
ENTRY(cpu_arm6_dcache_clean_entry)
ENTRY(cpu_arm7_dcache_clean_entry)
ENTRY(cpu_arm6_dcache_clean_area)
ENTRY(cpu_arm7_dcache_clean_area)
mov pc, lr
/*
* Function: arm6_7_data_abort ()
*
* Params : r0 = address of aborted instruction
* Params : r2 = address of aborted instruction
* : sp = pointer to registers
*
* Purpose : obtain information about current aborted instruction
*
* Returns : r0 = address of abort
* : r1 != 0 if writing
* : r3 = FSR
* : sp = pointer to registers
* : r1 = FSR
*/
ENTRY(cpu_arm6_data_abort)
ldr r4, [r0] @ read instruction causing problem
tst r4, r4, lsr #21 @ C = bit 20
sbc r1, r1, r1 @ r1 = C - 1
and r2, r4, #14 << 24
teq r2, #8 << 24 @ was it ldm/stm
bne Ldata_simple
Ldata_ldmstm: tst r4, #1 << 21 @ check writeback bit
beq Ldata_simple
mov r7, #0x11
orr r7, r7, r7, lsl #8
and r0, r4, r7
and r2, r4, r7, lsl #1
add r0, r0, r2, lsr #1
and r2, r4, r7, lsl #2
add r0, r0, r2, lsr #2
and r2, r4, r7, lsl #3
add r0, r0, r2, lsr #3
add r0, r0, r0, lsr #8
add r0, r0, r0, lsr #4
and r7, r0, #15 @ r7 = no. of registers to transfer.
and r5, r4, #15 << 16 @ Get Rn
ldr r0, [sp, r5, lsr #14] @ Get register
tst r4, #1 << 23 @ U bit
subne r7, r0, r7, lsl #2
addeq r7, r0, r7, lsl #2 @ Do correction (signed)
Ldata_saver7: str r7, [sp, r5, lsr #14] @ Put register
Ldata_simple: mrc p15, 0, r0, c6, c0, 0 @ get FAR
mrc p15, 0, r3, c5, c0, 0 @ get FSR
and r3, r3, #255
mov pc, lr
ENTRY(cpu_arm7_data_abort)
ldr r4, [r0] @ read instruction causing problem
tst r4, r4, lsr #21 @ C = bit 20
sbc r1, r1, r1 @ r1 = C - 1
and r2, r4, #15 << 24
add pc, pc, r2, lsr #22 @ Now branch to the relevant processing routine
movs pc, lr
b Ldata_unknown
b Ldata_unknown
b Ldata_unknown
b Ldata_unknown
b Ldata_lateldrpostconst @ ldr rd, [rn], #m
b Ldata_lateldrpreconst @ ldr rd, [rn, #m] @ RegVal
b Ldata_lateldrpostreg @ ldr rd, [rn], rm
b Ldata_lateldrprereg @ ldr rd, [rn, rm]
b Ldata_ldmstm @ ldm*a rn, <rlist>
b Ldata_ldmstm @ ldm*b rn, <rlist>
b Ldata_unknown
b Ldata_unknown
b Ldata_simple @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
b Ldata_simple @ ldc rd, [rn, #m]
b Ldata_unknown
Ldata_unknown: @ Part of jumptable
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r8, [r0] @ read arm instruction
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes.
and r7, r8, #15 << 24
add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine
nop
/* 0 */ b .data_unknown
/* 1 */ mov pc, lr @ swp
/* 2 */ b .data_unknown
/* 3 */ b .data_unknown
/* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m
/* 5 */ b .data_arm_lateldrpreconst @ ldr rd, [rn, #m]
/* 6 */ b .data_arm_lateldrpostreg @ ldr rd, [rn], rm
/* 7 */ b .data_arm_lateldrprereg @ ldr rd, [rn, rm]
/* 8 */ b .data_arm_ldmstm @ ldm*a rn, <rlist>
/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist>
/* a */ b .data_unknown
/* b */ b .data_unknown
/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ mov pc, lr @ ldc rd, [rn, #m]
/* e */ b .data_unknown
/* f */
.data_unknown: @ Part of jumptable
mov r0, r2
mov r1, r4
mov r2, r3
mov r1, r8
mov r2, sp
bl baddataabort
b ret_from_exception
Ldata_lateldrpreconst:
tst r4, #1 << 21 @ check writeback bit
beq Ldata_simple
Ldata_lateldrpostconst:
movs r2, r4, lsl #20 @ Get offset
beq Ldata_simple
and r5, r4, #15 << 16 @ Get Rn
ldr r0, [sp, r5, lsr #14]
tst r4, #1 << 23 @ U bit
subne r7, r0, r2, lsr #20
addeq r7, r0, r2, lsr #20
b Ldata_saver7
Ldata_lateldrprereg:
tst r4, #1 << 21 @ check writeback bit
beq Ldata_simple
Ldata_lateldrpostreg:
and r5, r4, #15
ldr r2, [sp, r5, lsl #2] @ Get Rm
mov r3, r4, lsr #7
ands r3, r3, #31
and r6, r4, #0x70
orreq r6, r6, #8
add pc, pc, r6
mov r0, r0
mov r2, r2, lsl r3 @ 0: LSL #!0
b 1f
b 1f @ 1: LSL #0
mov r0, r0
b 1f @ 2: MUL?
mov r0, r0
b 1f @ 3: MUL?
mov r0, r0
mov r2, r2, lsr r3 @ 4: LSR #!0
b 1f
mov r2, r2, lsr #32 @ 5: LSR #32
b 1f
b 1f @ 6: MUL?
mov r0, r0
b 1f @ 7: MUL?
mov r0, r0
mov r2, r2, asr r3 @ 8: ASR #!0
b 1f
mov r2, r2, asr #32 @ 9: ASR #32
b 1f
b 1f @ A: MUL?
mov r0, r0
b 1f @ B: MUL?
mov r0, r0
mov r2, r2, ror r3 @ C: ROR #!0
b 1f
mov r2, r2, rrx @ D: RRX
b 1f
mov r0, r0 @ E: MUL?
mov r0, r0
mov r0, r0 @ F: MUL?
1: and r5, r4, #15 << 16 @ Get Rn
ldr r0, [sp, r5, lsr #14]
tst r4, #1 << 23 @ U bit
subne r7, r0, r2
addeq r7, r0, r2
b Ldata_saver7
ENTRY(cpu_arm6_data_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r8, [r2] @ read arm instruction
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes.
and r7, r8, #14 << 24
teq r7, #8 << 24 @ was it ldm/stm
movne pc, lr
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
moveq pc, lr @ no writeback -> no fixup
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
and r2, r8, r7, lsl #1
add r6, r6, r2, lsr #1
and r2, r8, r7, lsl #2
add r6, r6, r2, lsr #2
and r2, r8, r7, lsl #3
add r6, r6, r2, lsr #3
add r6, r6, r6, lsr #8
add r6, r6, r6, lsr #4
and r6, r6, #15 @ r6 = no. of registers to transfer.
and r5, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [sp, r5, lsr #14] @ Put register 'Rn'
mov pc, lr
.data_arm_apply_r6_and_rn:
and r5, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [sp, r5, lsr #14] @ Put register 'Rn'
mov pc, lr
.data_arm_lateldrpreconst:
tst r8, #1 << 21 @ check writeback bit
moveq pc, lr @ no writeback -> no fixup
.data_arm_lateldrpostconst:
movs r2, r8, lsl #20 @ Get offset
moveq pc, lr @ zero -> no fixup
and r5, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r2, lsr #20 @ Undo increment
addeq r7, r7, r2, lsr #20 @ Undo decrement
str r7, [sp, r5, lsr #14] @ Put register 'Rn'
mov pc, lr
.data_arm_lateldrprereg:
tst r8, #1 << 21 @ check writeback bit
moveq pc, lr @ no writeback -> no fixup
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [sp, r7, lsl #2] @ Get register 'Rm'
mov r5, r8, lsr #7 @ get shift count
ands r5, r5, #31
and r7, r8, #0x70 @ get shift type
orreq r7, r7, #8 @ shift count = 0
add pc, pc, r7
nop
mov r6, r6, lsl r5 @ 0: LSL #!0
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
b .data_unknown @ 2: MUL?
nop
b .data_unknown @ 3: MUL?
nop
mov r6, r6, lsr r5 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
b .data_unknown @ 6: MUL?
nop
b .data_unknown @ 7: MUL?
nop
mov r6, r6, asr r5 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
b .data_unknown @ A: MUL?
nop
b .data_unknown @ B: MUL?
nop
mov r6, r6, ror r5 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
b .data_unknown @ E: MUL?
nop
b .data_unknown @ F: MUL?
/*
* Function: arm6_7_proc_init (void)
* : arm6_7_proc_fin (void)
*
* Notes : This processor does not require these
*/
ENTRY(cpu_arm6_proc_init)
ENTRY(cpu_arm7_proc_init)
mov pc, lr
......@@ -202,7 +186,6 @@ ENTRY(cpu_arm7_proc_fin)
ENTRY(cpu_arm6_do_idle)
ENTRY(cpu_arm7_do_idle)
mov r0, #-EINVAL
mov pc, lr
/*
......@@ -303,25 +286,9 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_proc_fin
.word cpu_arm6_reset
.word cpu_arm6_do_idle
/* cache */
.word cpu_arm6_cache_clean_invalidate_all
.word cpu_arm6_cache_clean_invalidate_range
/* dcache */
.word cpu_arm6_dcache_invalidate_range
.word cpu_arm6_dcache_clean_range
.word cpu_arm6_dcache_clean_page
.word cpu_arm6_dcache_clean_entry
/* icache */
.word cpu_arm6_icache_invalidate_range
.word cpu_arm6_icache_invalidate_page
/* pgtable */
.word cpu_arm6_dcache_clean_area
.word cpu_arm6_switch_mm
.word cpu_arm6_set_pte
.size arm6_processor_functions, . - arm6_processor_functions
/*
......@@ -335,25 +302,9 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_proc_fin
.word cpu_arm7_reset
.word cpu_arm7_do_idle
/* cache */
.word cpu_arm7_cache_clean_invalidate_all
.word cpu_arm7_cache_clean_invalidate_range
/* dcache */
.word cpu_arm7_dcache_invalidate_range
.word cpu_arm7_dcache_clean_range
.word cpu_arm7_dcache_clean_page
.word cpu_arm7_dcache_clean_entry
/* icache */
.word cpu_arm7_icache_invalidate_range
.word cpu_arm7_icache_invalidate_page
/* pgtable */
.word cpu_arm7_dcache_clean_area
.word cpu_arm7_switch_mm
.word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions
.type cpu_arch_name, #object
......@@ -380,6 +331,7 @@ __arm6_proc_info:
.long arm6_processor_functions
.long v3_tlb_fns
.long v3_user_fns
.long v3_cache_fns
.size __arm6_proc_info, . - __arm6_proc_info
.type __arm610_proc_info, #object
......@@ -395,6 +347,7 @@ __arm610_proc_info:
.long arm6_processor_functions
.long v3_tlb_fns
.long v3_user_fns
.long v3_cache_fns
.size __arm610_proc_info, . - __arm610_proc_info
.type __arm7_proc_info, #object
......@@ -410,6 +363,7 @@ __arm7_proc_info:
.long arm7_processor_functions
.long v3_tlb_fns
.long v3_user_fns
.long v3_cache_fns
.size __arm7_proc_info, . - __arm7_proc_info
.type __arm710_proc_info, #object
......@@ -425,4 +379,5 @@ __arm710_proc_info:
.long arm7_processor_functions
.long v3_tlb_fns
.long v3_user_fns
.long v3_cache_fns
.size __arm710_proc_info, . - __arm710_proc_info
......@@ -11,27 +11,28 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/proc-fns.h>
#include <asm/tlbflush.h>
EXPORT_SYMBOL(__flush_dcache_page);
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_cache_clean_invalidate_all);
EXPORT_SYMBOL(cpu_cache_clean_invalidate_range);
EXPORT_SYMBOL(cpu_dcache_clean_page);
EXPORT_SYMBOL(cpu_dcache_clean_entry);
EXPORT_SYMBOL(cpu_dcache_clean_range);
EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd);
EXPORT_SYMBOL(cpu_dcache_clean_area);
EXPORT_SYMBOL(cpu_set_pte);
#else
EXPORT_SYMBOL(processor);
#endif
#ifndef MULTI_CACHE
EXPORT_SYMBOL_NOVERS(__cpuc_flush_kern_all);
EXPORT_SYMBOL_NOVERS(__cpuc_flush_user_all);
EXPORT_SYMBOL_NOVERS(__cpuc_flush_user_range);
EXPORT_SYMBOL_NOVERS(__cpuc_coherent_kern_range);
EXPORT_SYMBOL_NOVERS(__cpuc_flush_dcache_page);
#else
EXPORT_SYMBOL(cpu_cache);
#endif
/*
* No module should need to touch the TLB (and currently
* no modules do. We export this for "loadkernel" support
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment