Commit a6560a26 authored by Russell King's avatar Russell King

Clean up ARM TLB handling code; previously there was a lot of code

replication across each processor type, each handling alignment of
addresses slightly differently.  We unify this mess, and allow for
greater flexibility in the per-CPU architecture TLB handlers.

We also start to remove the ARM cache.h -> cpu_*.h -> proc-fns.h mess
making the code cleaner and easier to follow.

Documentation describing the expected behaviour of each TLB function
for the 32-bit ARM processors is also included.
parent eac94688
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/procinfo.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#define K(a,b,c) ((a) << 24 | (b) << 12 | (c)) #define K(a,b,c) ((a) << 24 | (b) << 12 | (c))
...@@ -374,7 +375,7 @@ __lookup_processor_type: ...@@ -374,7 +375,7 @@ __lookup_processor_type:
and r6, r6, r9 @ mask wanted bits and r6, r6, r9 @ mask wanted bits
teq r5, r6 teq r5, r6
moveq pc, lr moveq pc, lr
add r10, r10, #36 @ sizeof(proc_info_list) add r10, r10, #PROC_INFO_SZ @ sizeof(proc_info_list)
cmp r10, r7 cmp r10, r7
blt 1b blt 1b
mov r10, #0 @ unknown processor mov r10, #0 @ unknown processor
......
...@@ -73,6 +73,9 @@ unsigned int elf_hwcap; ...@@ -73,6 +73,9 @@ unsigned int elf_hwcap;
#ifdef MULTI_CPU #ifdef MULTI_CPU
struct processor processor; struct processor processor;
#endif #endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb;
#endif
unsigned char aux_device_present; unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
...@@ -242,6 +245,9 @@ static void __init setup_processor(void) ...@@ -242,6 +245,9 @@ static void __init setup_processor(void)
#ifdef MULTI_CPU #ifdef MULTI_CPU
processor = *list->proc; processor = *list->proc;
#endif #endif
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
printk("Processor: %s %s revision %d\n", printk("Processor: %s %s revision %d\n",
proc_info.manufacturer, proc_info.cpu_name, proc_info.manufacturer, proc_info.cpu_name,
......
...@@ -44,8 +44,6 @@ obj-$(v4) += io-readsw-armv4.o io-writesw-armv4.o io-readsl-armv4.o ...@@ -44,8 +44,6 @@ obj-$(v4) += io-readsw-armv4.o io-writesw-armv4.o io-readsl-armv4.o
obj-y += io-writesl.o obj-y += io-writesl.o
obj-$(CONFIG_CPU_26) += uaccess-armo.o obj-$(CONFIG_CPU_26) += uaccess-armo.o
obj-$(CONFIG_CPU_32) += copy_page-armv3.o copy_page-armv4.o copy_page-armv4mc.o
obj-$(CONFIG_CPU_32v5) += copy_page-armv5te.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
......
...@@ -29,16 +29,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o ...@@ -29,16 +29,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
# Select the processor-specific files # Select the processor-specific files
p-$(CONFIG_CPU_26) += proc-arm2,3.o p-$(CONFIG_CPU_26) += proc-arm2,3.o
p-$(CONFIG_CPU_ARM610) += proc-arm6,7.o p-$(CONFIG_CPU_ARM610) += proc-arm6,7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6,7.o p-$(CONFIG_CPU_ARM710) += proc-arm6,7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o armv4t-late-abort.o p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o armv4t-early-abort.o p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wb.o copypage-v4.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o armv4t-early-abort.o p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wb.o copypage-v4.o abort-ev4t.o
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o armv5ej-early-abort.o p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wb.o copypage-v4.o abort-ev5ej.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o armv4t-early-abort.o p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wb.o copypage-v4.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o armv4-early-abort.o p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4.o copypage-v4mc.o abort-ev4.o minicache.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o armv4-early-abort.o minicache.o p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4.o copypage-v4mc.o abort-ev4.o minicache.o
p-$(CONFIG_CPU_XSCALE) += proc-xscale.o armv4t-early-abort.o minicache.o p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wb.o copypage-v5te.o abort-ev4t.o minicache.o
obj-y += $(sort $(p-y)) obj-y += $(sort $(p-y))
......
...@@ -16,9 +16,14 @@ ...@@ -16,9 +16,14 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define minicache_address (0xffff2000) /*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define minicache_address (0xffff8000)
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE) L_PTE_CACHEABLE)
...@@ -39,7 +44,7 @@ static pte_t *minicache_pte; ...@@ -39,7 +44,7 @@ static pte_t *minicache_pte;
unsigned long map_page_minicache(unsigned long virt) unsigned long map_page_minicache(unsigned long virt)
{ {
set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot)); set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
cpu_tlb_invalidate_page(minicache_address, 0); flush_kern_tlb_page(minicache_address);
return minicache_address; return minicache_address;
} }
......
...@@ -420,71 +420,6 @@ ENTRY(cpu_arm1020_icache_invalidate_page) ...@@ -420,71 +420,6 @@ ENTRY(cpu_arm1020_icache_invalidate_page)
#endif #endif
mov pc, lr mov pc, lr
/* ================================== TLB ================================= */
/*
* cpu_arm1020_tlb_invalidate_all()
*
* Invalidate all TLB entries
*/
.align 5
ENTRY(cpu_arm1020_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c8, c7, 0 @ invalidate I & D tlbs
#ifdef CONFIG_CPU_ARM1020_BRANCH_PREDICTION
mov r0, r0
mov r0, r0
#endif
mov pc, lr
/*
* cpu_arm1020_tlb_invalidate_range(start, end)
*
* invalidate TLB entries covering the specified range
*
* start: range start address
* end: range end address
*/
.align 5
ENTRY(cpu_arm1020_tlb_invalidate_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
add r0, r0, #PAGESIZE
#ifdef CONFIG_CPU_ARM1020_BRANCH_PREDICTION
mov r0, r0
#endif
cmp r0, r1
blt 1b
mov pc, lr
/*
* cpu_arm1020_tlb_invalidate_page(page, flags)
*
* invalidate the TLB entries for the specified page.
*
* page: page to invalidate
* flags: non-zero if we include the I TLB
*/
.align 5
ENTRY(cpu_arm1020_tlb_invalidate_page)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
#ifdef CONFIG_CPU_ARM1020_BRANCH_PREDICTION
mov r0, r0
mov r0, r0
#endif
teq r1, #0
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
#ifdef CONFIG_CPU_ARM1020_BRANCH_PREDICTION
mov r0, r0
mov r0, r0
#endif
mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
...@@ -695,11 +630,6 @@ arm1020_processor_functions: ...@@ -695,11 +630,6 @@ arm1020_processor_functions:
.word cpu_arm1020_icache_invalidate_range .word cpu_arm1020_icache_invalidate_range
.word cpu_arm1020_icache_invalidate_page .word cpu_arm1020_icache_invalidate_page
/* tlb */
.word cpu_arm1020_tlb_invalidate_all
.word cpu_arm1020_tlb_invalidate_range
.word cpu_arm1020_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm1020_set_pgd .word cpu_arm1020_set_pgd
.word cpu_arm1020_set_pmd .word cpu_arm1020_set_pmd
...@@ -741,4 +671,5 @@ __arm1020_proc_info: ...@@ -741,4 +671,5 @@ __arm1020_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
.long cpu_arm1020_info .long cpu_arm1020_info
.long arm1020_processor_functions .long arm1020_processor_functions
.long v4wbi_tlb_fns
.size __arm1020_proc_info, . - __arm1020_proc_info .size __arm1020_proc_info, . - __arm1020_proc_info
...@@ -341,6 +341,7 @@ arm3_elf_name: .asciz "v2" ...@@ -341,6 +341,7 @@ arm3_elf_name: .asciz "v2"
.long 0 .long 0
.long cpu_arm2_info .long cpu_arm2_info
.long SYMBOL_NAME(arm2_processor_functions) .long SYMBOL_NAME(arm2_processor_functions)
.long 0
.long 0x41560250 .long 0x41560250
.long 0xfffffff0 .long 0xfffffff0
...@@ -351,6 +352,7 @@ arm3_elf_name: .asciz "v2" ...@@ -351,6 +352,7 @@ arm3_elf_name: .asciz "v2"
.long 0 .long 0
.long cpu_arm250_info .long cpu_arm250_info
.long SYMBOL_NAME(arm250_processor_functions) .long SYMBOL_NAME(arm250_processor_functions)
.long 0
.long 0x41560300 .long 0x41560300
.long 0xfffffff0 .long 0xfffffff0
...@@ -361,4 +363,4 @@ arm3_elf_name: .asciz "v2" ...@@ -361,4 +363,4 @@ arm3_elf_name: .asciz "v2"
.long 0 .long 0
.long cpu_arm3_info .long cpu_arm3_info
.long SYMBOL_NAME(arm3_processor_functions) .long SYMBOL_NAME(arm3_processor_functions)
.long 0
...@@ -47,47 +47,6 @@ ENTRY(cpu_arm6_flush_ram_page) ...@@ -47,47 +47,6 @@ ENTRY(cpu_arm6_flush_ram_page)
ENTRY(cpu_arm7_flush_ram_page) ENTRY(cpu_arm7_flush_ram_page)
mov pc, lr mov pc, lr
/*
* Function: arm6_7_tlb_invalidate_all (void)
*
* Purpose : flush all TLB entries in all caches
*/
ENTRY(cpu_arm6_tlb_invalidate_all)
ENTRY(cpu_arm7_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c5, c0, 0 @ flush TLB
mov pc, lr
/*
* Function: arm6_7_tlb_invalidate_page (unsigned long address, int end, int flags)
*
* Params : address Area start address
* : end Area end address
* : flags b0 = I cache as well
*
* Purpose : flush a TLB entry
*/
ENTRY(cpu_arm6_tlb_invalidate_range)
ENTRY(cpu_arm7_tlb_invalidate_range)
1: mcr p15, 0, r0, c6, c0, 0 @ flush TLB
add r0, r0, #4096
cmp r0, r1
blt 1b
mov pc, lr
/*
* Function: arm6_7_tlb_invalidate_page (unsigned long address, int flags)
*
* Params : address Address
* : flags b0 = I-TLB as well
*
* Purpose : flush a TLB entry
*/
ENTRY(cpu_arm6_tlb_invalidate_page)
ENTRY(cpu_arm7_tlb_invalidate_page)
mcr p15, 0, r0, c6, c0, 0 @ flush TLB
mov pc, lr
/* /*
* Function: arm6_7_data_abort () * Function: arm6_7_data_abort ()
* *
...@@ -409,11 +368,6 @@ ENTRY(arm6_processor_functions) ...@@ -409,11 +368,6 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_icache_invalidate_range .word cpu_arm6_icache_invalidate_range
.word cpu_arm6_icache_invalidate_page .word cpu_arm6_icache_invalidate_page
/* tlb */
.word cpu_arm6_tlb_invalidate_all
.word cpu_arm6_tlb_invalidate_range
.word cpu_arm6_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm6_set_pgd .word cpu_arm6_set_pgd
.word cpu_arm6_set_pmd .word cpu_arm6_set_pmd
...@@ -453,11 +407,6 @@ ENTRY(arm7_processor_functions) ...@@ -453,11 +407,6 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_icache_invalidate_range .word cpu_arm7_icache_invalidate_range
.word cpu_arm7_icache_invalidate_page .word cpu_arm7_icache_invalidate_page
/* tlb */
.word cpu_arm7_tlb_invalidate_all
.word cpu_arm7_tlb_invalidate_range
.word cpu_arm7_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm7_set_pgd .word cpu_arm7_set_pgd
.word cpu_arm7_set_pmd .word cpu_arm7_set_pmd
...@@ -515,6 +464,7 @@ __arm6_proc_info: ...@@ -515,6 +464,7 @@ __arm6_proc_info:
.long HWCAP_SWP | HWCAP_26BIT .long HWCAP_SWP | HWCAP_26BIT
.long cpu_arm6_info .long cpu_arm6_info
.long arm6_processor_functions .long arm6_processor_functions
.long v3_tlb_fns
.size __arm6_proc_info, . - __arm6_proc_info .size __arm6_proc_info, . - __arm6_proc_info
.type __arm610_proc_info, #object .type __arm610_proc_info, #object
...@@ -528,6 +478,7 @@ __arm610_proc_info: ...@@ -528,6 +478,7 @@ __arm610_proc_info:
.long HWCAP_SWP | HWCAP_26BIT .long HWCAP_SWP | HWCAP_26BIT
.long cpu_arm610_info .long cpu_arm610_info
.long arm6_processor_functions .long arm6_processor_functions
.long v3_tlb_fns
.size __arm610_proc_info, . - __arm610_proc_info .size __arm610_proc_info, . - __arm610_proc_info
.type __arm7_proc_info, #object .type __arm7_proc_info, #object
...@@ -541,6 +492,7 @@ __arm7_proc_info: ...@@ -541,6 +492,7 @@ __arm7_proc_info:
.long HWCAP_SWP | HWCAP_26BIT .long HWCAP_SWP | HWCAP_26BIT
.long cpu_arm7_info .long cpu_arm7_info
.long arm7_processor_functions .long arm7_processor_functions
.long v3_tlb_fns
.size __arm7_proc_info, . - __arm7_proc_info .size __arm7_proc_info, . - __arm7_proc_info
.type __arm710_proc_info, #object .type __arm710_proc_info, #object
...@@ -554,4 +506,5 @@ __arm710_proc_info: ...@@ -554,4 +506,5 @@ __arm710_proc_info:
.long HWCAP_SWP | HWCAP_26BIT .long HWCAP_SWP | HWCAP_26BIT
.long cpu_arm710_info .long cpu_arm710_info
.long arm7_processor_functions .long arm7_processor_functions
.long v3_tlb_fns
.size __arm710_proc_info, . - __arm710_proc_info .size __arm710_proc_info, . - __arm710_proc_info
...@@ -66,44 +66,6 @@ ENTRY(cpu_arm720_dcache_clean_entry) ...@@ -66,44 +66,6 @@ ENTRY(cpu_arm720_dcache_clean_entry)
ENTRY(cpu_arm720_flush_ram_page) ENTRY(cpu_arm720_flush_ram_page)
mov pc, lr mov pc, lr
/*
* Function: arm720_tlb_invalidate_all (void)
*
* Purpose : flush all TLB entries in all caches
*/
ENTRY(cpu_arm720_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4)
mov pc, lr
/*
* Function: arm720_tlb_invalidate_page (unsigned long address, int end, int flags)
*
* Params : address Area start address
* : end Area end address
* : flags b0 = I cache as well
*
* Purpose : flush a TLB entry
*/
ENTRY(cpu_arm720_tlb_invalidate_range)
1: mcr p15, 0, r0, c8, c7, 1 @ flush TLB (v4)
add r0, r0, #4096
cmp r0, r1
blt 1b
mov pc, lr
/*
* Function: arm720_tlb_invalidate_page (unsigned long address, int flags)
*
* Params : address Address
* : flags b0 = I-TLB as well
*
* Purpose : flush a TLB entry
*/
ENTRY(cpu_arm720_tlb_invalidate_page)
mcr p15, 0, r0, c8, c7, 1 @ flush TLB (v4)
mov pc, lr
/* /*
* Function: arm720_check_bugs (void) * Function: arm720_check_bugs (void)
* : arm720_proc_init (void) * : arm720_proc_init (void)
...@@ -259,11 +221,6 @@ ENTRY(arm720_processor_functions) ...@@ -259,11 +221,6 @@ ENTRY(arm720_processor_functions)
.word cpu_arm720_icache_invalidate_range .word cpu_arm720_icache_invalidate_range
.word cpu_arm720_icache_invalidate_page .word cpu_arm720_icache_invalidate_page
/* tlb */
.word cpu_arm720_tlb_invalidate_all
.word cpu_arm720_tlb_invalidate_range
.word cpu_arm720_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm720_set_pgd .word cpu_arm720_set_pgd
.word cpu_arm720_set_pmd .word cpu_arm720_set_pmd
...@@ -307,4 +264,5 @@ __arm720_proc_info: ...@@ -307,4 +264,5 @@ __arm720_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT @ elf_hwcap .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT @ elf_hwcap
.long cpu_arm720_info @ info .long cpu_arm720_info @ info
.long arm720_processor_functions .long arm720_processor_functions
.long v4_tlb_fns
.size __arm720_proc_info, . - __arm720_proc_info .size __arm720_proc_info, . - __arm720_proc_info
...@@ -358,62 +358,6 @@ ENTRY(cpu_arm920_icache_invalidate_page) ...@@ -358,62 +358,6 @@ ENTRY(cpu_arm920_icache_invalidate_page)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr mov pc, lr
/* ================================== TLB ================================= */
/*
* cpu_arm920_tlb_invalidate_all()
*
* Invalidate all TLB entries
*/
.align 5
ENTRY(cpu_arm920_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm920_tlb_invalidate_range(start, end)
*
* invalidate TLB entries covering the specified range
*
* start: range start address
* end: range end address
*/
.align 5
ENTRY(cpu_arm920_tlb_invalidate_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
mov r3, #PAGESIZE
sub r3, r3, #1
bic r0, r0, r3
bic r1, r1, r3
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
add r0, r0, #PAGESIZE
cmp r0, r1
blt 1b
mov pc, lr
/*
* cpu_arm920_tlb_invalidate_page(page, flags)
*
* invalidate the TLB entries for the specified page.
*
* page: page to invalidate
* flags: non-zero if we include the I TLB
*/
.align 5
ENTRY(cpu_arm920_tlb_invalidate_page)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
teq r1, #0
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
...@@ -589,11 +533,6 @@ arm920_processor_functions: ...@@ -589,11 +533,6 @@ arm920_processor_functions:
.word cpu_arm920_icache_invalidate_range .word cpu_arm920_icache_invalidate_range
.word cpu_arm920_icache_invalidate_page .word cpu_arm920_icache_invalidate_page
/* tlb */
.word cpu_arm920_tlb_invalidate_all
.word cpu_arm920_tlb_invalidate_range
.word cpu_arm920_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm920_set_pgd .word cpu_arm920_set_pgd
.word cpu_arm920_set_pmd .word cpu_arm920_set_pmd
...@@ -635,4 +574,5 @@ __arm920_proc_info: ...@@ -635,4 +574,5 @@ __arm920_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
.long cpu_arm920_info .long cpu_arm920_info
.long arm920_processor_functions .long arm920_processor_functions
.long v4wbi_tlb_fns
.size __arm920_proc_info, . - __arm920_proc_info .size __arm920_proc_info, . - __arm920_proc_info
...@@ -359,62 +359,6 @@ ENTRY(cpu_arm922_icache_invalidate_page) ...@@ -359,62 +359,6 @@ ENTRY(cpu_arm922_icache_invalidate_page)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr mov pc, lr
/* ================================== TLB ================================= */
/*
* cpu_arm922_tlb_invalidate_all()
*
* Invalidate all TLB entries
*/
.align 5
ENTRY(cpu_arm922_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm922_tlb_invalidate_range(start, end)
*
* invalidate TLB entries covering the specified range
*
* start: range start address
* end: range end address
*/
.align 5
ENTRY(cpu_arm922_tlb_invalidate_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
mov r3, #PAGESIZE
sub r3, r3, #1
bic r0, r0, r3
bic r1, r1, r3
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
add r0, r0, #PAGESIZE
cmp r0, r1
blt 1b
mov pc, lr
/*
* cpu_arm922_tlb_invalidate_page(page, flags)
*
* invalidate the TLB entries for the specified page.
*
* page: page to invalidate
* flags: non-zero if we include the I TLB
*/
.align 5
ENTRY(cpu_arm922_tlb_invalidate_page)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
teq r1, #0
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
...@@ -590,11 +534,6 @@ arm922_processor_functions: ...@@ -590,11 +534,6 @@ arm922_processor_functions:
.word cpu_arm922_icache_invalidate_range .word cpu_arm922_icache_invalidate_range
.word cpu_arm922_icache_invalidate_page .word cpu_arm922_icache_invalidate_page
/* tlb */
.word cpu_arm922_tlb_invalidate_all
.word cpu_arm922_tlb_invalidate_range
.word cpu_arm922_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm922_set_pgd .word cpu_arm922_set_pgd
.word cpu_arm922_set_pmd .word cpu_arm922_set_pmd
...@@ -636,4 +575,5 @@ __arm922_proc_info: ...@@ -636,4 +575,5 @@ __arm922_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
.long cpu_arm922_info .long cpu_arm922_info
.long arm922_processor_functions .long arm922_processor_functions
.long v4wbi_tlb_fns
.size __arm922_proc_info, . - __arm922_proc_info .size __arm922_proc_info, . - __arm922_proc_info
...@@ -387,62 +387,6 @@ ENTRY(cpu_arm926_icache_invalidate_page) ...@@ -387,62 +387,6 @@ ENTRY(cpu_arm926_icache_invalidate_page)
mov pc, lr mov pc, lr
/* ================================== TLB ================================= */
/*
* cpu_arm926_tlb_invalidate_all()
*
* Invalidate all TLB entries
*/
.align 5
ENTRY(cpu_arm926_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm926_tlb_invalidate_range(start, end)
*
* invalidate TLB entries covering the specified range
*
* start: range start address
* end: range end address
*/
.align 5
ENTRY(cpu_arm926_tlb_invalidate_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
mov r3, #PAGESIZE
sub r3, r3, #1
bic r0, r0, r3
bic r1, r1, r3
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
add r0, r0, #PAGESIZE
cmp r0, r1
blt 1b
mov pc, lr
/*
* cpu_arm926_tlb_invalidate_page(page, flags)
*
* invalidate the TLB entries for the specified page.
*
* page: page to invalidate
* flags: non-zero if we include the I TLB
*/
.align 5
ENTRY(cpu_arm926_tlb_invalidate_page)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
teq r1, #0
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
...@@ -627,11 +571,6 @@ arm926_processor_functions: ...@@ -627,11 +571,6 @@ arm926_processor_functions:
.word cpu_arm926_icache_invalidate_range .word cpu_arm926_icache_invalidate_range
.word cpu_arm926_icache_invalidate_page .word cpu_arm926_icache_invalidate_page
/* tlb */
.word cpu_arm926_tlb_invalidate_all
.word cpu_arm926_tlb_invalidate_range
.word cpu_arm926_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm926_set_pgd .word cpu_arm926_set_pgd
.word cpu_arm926_set_pmd .word cpu_arm926_set_pmd
...@@ -673,4 +612,5 @@ __arm926_proc_info: ...@@ -673,4 +612,5 @@ __arm926_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
.long cpu_arm926_info .long cpu_arm926_info
.long arm926_processor_functions .long arm926_processor_functions
.long v4wbi_tlb_fns
.size __arm926_proc_info, . - __arm926_proc_info .size __arm926_proc_info, . - __arm926_proc_info
#include <asm/thread_info.h>
/*
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
*/
.macro vma_vm_mm, rd, rn
ldr \rd, [\rn, #VMA_VM_MM]
.endm
/*
* vma_vm_flags - get vma->vm_flags
*/
.macro vma_vm_flags, rd, rn
ldr \rd, [\rn, #VMA_VM_FLAGS]
.endm
/*
* act_mm - get current->active_mm
*/
.macro act_mm, rd
bic \rd, sp, #8128
bic \rd, \rd, #63
ldr \rd, [\rd, #TI_TASK]
ldr \rd, [\rd, #TSK_ACTIVE_MM]
.endm
...@@ -406,61 +406,6 @@ ENTRY(cpu_sa1100_icache_invalidate_page) ...@@ -406,61 +406,6 @@ ENTRY(cpu_sa1100_icache_invalidate_page)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr mov pc, lr
/* ================================== TLB ================================= */
/*
* cpu_sa110_tlb_invalidate_all()
*
* Invalidate all TLB entries
*/
.align 5
ENTRY(cpu_sa110_tlb_invalidate_all)
ENTRY(cpu_sa1100_tlb_invalidate_all)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa110_tlb_invalidate_range(start, end)
*
* invalidate TLB entries covering the specified range
*
* start: range start address
* end: range end address
*/
.align 5
ENTRY(cpu_sa110_tlb_invalidate_range)
ENTRY(cpu_sa1100_tlb_invalidate_range)
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/*
* cpu_sa110_tlb_invalidate_page(page, flags)
*
* invalidate the TLB entries for the specified page.
*
* page: page to invalidate
* flags: non-zero if we include the I TLB
*/
.align 5
ENTRY(cpu_sa110_tlb_invalidate_page)
ENTRY(cpu_sa1100_tlb_invalidate_page)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
teq r1, #0
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
...@@ -615,11 +560,6 @@ ENTRY(sa110_processor_functions) ...@@ -615,11 +560,6 @@ ENTRY(sa110_processor_functions)
.word cpu_sa110_icache_invalidate_range .word cpu_sa110_icache_invalidate_range
.word cpu_sa110_icache_invalidate_page .word cpu_sa110_icache_invalidate_page
/* tlb */
.word cpu_sa110_tlb_invalidate_all
.word cpu_sa110_tlb_invalidate_range
.word cpu_sa110_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_sa110_set_pgd .word cpu_sa110_set_pgd
.word cpu_sa110_set_pmd .word cpu_sa110_set_pmd
...@@ -665,19 +605,14 @@ ENTRY(sa1100_processor_functions) ...@@ -665,19 +605,14 @@ ENTRY(sa1100_processor_functions)
.word cpu_sa1100_icache_invalidate_range .word cpu_sa1100_icache_invalidate_range
.word cpu_sa1100_icache_invalidate_page .word cpu_sa1100_icache_invalidate_page
/* tlb */
.word cpu_sa1100_tlb_invalidate_all
.word cpu_sa1100_tlb_invalidate_range
.word cpu_sa1100_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_sa1100_set_pgd .word cpu_sa1100_set_pgd
.word cpu_sa1100_set_pmd .word cpu_sa1100_set_pmd
.word cpu_sa1100_set_pte .word cpu_sa1100_set_pte
/* misc */ /* misc */
.word armv4_clear_user_page .word armv4_mc_clear_user_page
.word armv4_copy_user_page .word armv4_mc_copy_user_page
.size sa1100_processor_functions, . - sa1100_processor_functions .size sa1100_processor_functions, . - sa1100_processor_functions
...@@ -715,6 +650,7 @@ __sa110_proc_info: ...@@ -715,6 +650,7 @@ __sa110_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa110_info .long cpu_sa110_info
.long sa110_processor_functions .long sa110_processor_functions
.long v4wb_tlb_fns
.size __sa110_proc_info, . - __sa110_proc_info .size __sa110_proc_info, . - __sa110_proc_info
.type __sa1100_proc_info,#object .type __sa1100_proc_info,#object
...@@ -728,6 +664,7 @@ __sa1100_proc_info: ...@@ -728,6 +664,7 @@ __sa1100_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_info .long cpu_sa1100_info
.long sa1100_processor_functions .long sa1100_processor_functions
.long v4wb_tlb_fns
.size __sa1100_proc_info, . - __sa1100_proc_info .size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object .type __sa1110_proc_info,#object
...@@ -741,4 +678,5 @@ __sa1110_proc_info: ...@@ -741,4 +678,5 @@ __sa1110_proc_info:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_info .long cpu_sa1110_info
.long sa1100_processor_functions .long sa1100_processor_functions
.long v4wb_tlb_fns
.size __sa1110_proc_info, . - __sa1110_proc_info .size __sa1110_proc_info, . - __sa1110_proc_info
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <asm/pgalloc.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#ifndef MULTI_CPU #ifndef MULTI_CPU
...@@ -20,12 +23,18 @@ EXPORT_SYMBOL(cpu_dcache_clean_range); ...@@ -20,12 +23,18 @@ EXPORT_SYMBOL(cpu_dcache_clean_range);
EXPORT_SYMBOL(cpu_dcache_invalidate_range); EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range); EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page); EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_tlb_invalidate_all);
EXPORT_SYMBOL(cpu_tlb_invalidate_range);
EXPORT_SYMBOL(cpu_tlb_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd); EXPORT_SYMBOL(cpu_set_pgd);
EXPORT_SYMBOL(cpu_set_pmd); EXPORT_SYMBOL(cpu_set_pmd);
EXPORT_SYMBOL(cpu_set_pte); EXPORT_SYMBOL(cpu_set_pte);
#else #else
EXPORT_SYMBOL(processor); EXPORT_SYMBOL(processor);
#endif #endif
#ifndef MULTI_TLB
EXPORT_SYMBOL_NOVERS(__cpu_flush_kern_tlb_all);
EXPORT_SYMBOL_NOVERS(__cpu_flush_user_tlb_mm);
EXPORT_SYMBOL_NOVERS(__cpu_flush_user_tlb_range);
EXPORT_SYMBOL_NOVERS(__cpu_flush_user_tlb_page);
#else
EXPORT_SYMBOL(cpu_tlb);
#endif
...@@ -494,55 +494,6 @@ ENTRY(xscale_dcache_unlock) ...@@ -494,55 +494,6 @@ ENTRY(xscale_dcache_unlock)
ENTRY(xscale_cache_dummy) ENTRY(xscale_cache_dummy)
mov pc, lr mov pc, lr
/* ================================== TLB ================================= */
/*
* cpu_xscale_tlb_invalidate_all()
*
* Invalidate all TLB entries
*/
.align 5
ENTRY(cpu_xscale_tlb_invalidate_all)
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip
/*
* cpu_xscale_tlb_invalidate_range(start, end)
*
* invalidate TLB entries covering the specified range
*
* start: range start address
* end: range end address
*/
.align 5
ENTRY(cpu_xscale_tlb_invalidate_range)
bic r0, r0, #(PAGESIZE - 1) & 0x00ff
bic r0, r0, #(PAGESIZE - 1) & 0xff00
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
add r0, r0, #PAGESIZE
cmp r0, r1
blo 1b
cpwait_ret lr, ip
/*
* cpu_xscale_tlb_invalidate_page(page, flags)
*
* invalidate the TLB entries for the specified page.
*
* page: page to invalidate
* flags: non-zero if we include the I TLB
*/
.align 5
ENTRY(cpu_xscale_tlb_invalidate_page)
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
teq r1, #0
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mcrne p15, 0, r3, c8, c5, 1 @ invalidate I TLB entry
cpwait_ret lr, ip
/* ================================ TLB LOCKING============================== /* ================================ TLB LOCKING==============================
* *
* The XScale MicroArchitecture implements support for locking entries into * The XScale MicroArchitecture implements support for locking entries into
...@@ -637,6 +588,7 @@ ENTRY(cpu_xscale_set_pmd) ...@@ -637,6 +588,7 @@ ENTRY(cpu_xscale_set_pmd)
orreq r1, r1, #PMD_SECT_TEX(1) orreq r1, r1, #PMD_SECT_TEX(1)
#endif #endif
str r1, [r0] str r1, [r0]
mov ip, #0
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
...@@ -645,6 +597,8 @@ ENTRY(cpu_xscale_set_pmd) ...@@ -645,6 +597,8 @@ ENTRY(cpu_xscale_set_pmd)
* cpu_xscale_set_pte(ptep, pte) * cpu_xscale_set_pte(ptep, pte)
* *
* Set a PTE and flush it out * Set a PTE and flush it out
*
* Errata 40: must set memory to write-through for user read-only pages.
*/ */
.align 5 .align 5
ENTRY(cpu_xscale_set_pte) ENTRY(cpu_xscale_set_pte)
...@@ -653,29 +607,49 @@ ENTRY(cpu_xscale_set_pte) ...@@ -653,29 +607,49 @@ ENTRY(cpu_xscale_set_pte)
bic r2, r1, #0xff0 bic r2, r1, #0xff0
orr r2, r2, #PTE_TYPE_EXT @ extended page orr r2, r2, #PTE_TYPE_EXT @ extended page
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
tst r1, #L_PTE_USER | L_PTE_EXEC @ User or Exec? tst r3, #L_PTE_USER | L_PTE_EXEC @ User or Exec?
orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
@ combined with user -> user r/w @ combined with user -> user r/w
@
@ Handle the X bit. We want to set this bit for the minicache
@ (U = E = B = W = 0, C = 1) or when write allocate is enabled,
@ and we have a writeable, cacheable region. If we ignore the
@ U and E bits, we can allow user space to use the minicache as
@ well.
@
@ X = (C & ~W & ~B) | (C & W & B & write_allocate)
@
eor ip, r1, #L_PTE_CACHEABLE
tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
#if PTE_CACHE_WRITE_ALLOCATE #if PTE_CACHE_WRITE_ALLOCATE
tst r1, #L_PTE_CACHEABLE @ cacheable? eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
orrne r2, r2, #PTE_EXT_TEX(1) tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
#else
eor r1, r1, #L_PTE_CACHEABLE
tst r1, #L_PTE_CACHEABLE | L_PTE_BUFFERABLE @ C = 1 B = 0?
orreq r2, r2, #PTE_EXT_TEX(1) @ yes -> set X (minicache)
#endif #endif
orreq r2, r2, #PTE_EXT_TEX(1)
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
@
@ Erratum 40: The B bit must be cleared for a user read-only
@ cacheable page.
@
@ B = B & ~((U|E) & C & ~W)
@
and ip, r1, #L_PTE_USER | L_PTE_EXEC | L_PTE_WRITE | L_PTE_CACHEABLE
teq ip, #L_PTE_USER | L_PTE_CACHEABLE
teqne ip, #L_PTE_EXEC | L_PTE_CACHEABLE
teqne ip, #L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE
biceq r2, r2, #PTE_BUFFERABLE
tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0 @ no -> fault movne r2, #0 @ no -> fault
str r2, [r0] @ hardware version str r2, [r0] @ hardware version
mov r0, r0 mov ip, #0
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
...@@ -743,11 +717,6 @@ ENTRY(xscale_processor_functions) ...@@ -743,11 +717,6 @@ ENTRY(xscale_processor_functions)
.word cpu_xscale_icache_invalidate_range .word cpu_xscale_icache_invalidate_range
.word cpu_xscale_icache_invalidate_page .word cpu_xscale_icache_invalidate_page
/* tlb */
.word cpu_xscale_tlb_invalidate_all
.word cpu_xscale_tlb_invalidate_range
.word cpu_xscale_tlb_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_xscale_set_pgd .word cpu_xscale_set_pgd
.word cpu_xscale_set_pmd .word cpu_xscale_set_pmd
...@@ -795,6 +764,7 @@ __80200_proc_info: ...@@ -795,6 +764,7 @@ __80200_proc_info:
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_80200_info .long cpu_80200_info
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns
.size __80200_proc_info, . - __80200_proc_info .size __80200_proc_info, . - __80200_proc_info
.type __cotulla_proc_info,#object .type __cotulla_proc_info,#object
...@@ -808,5 +778,6 @@ __cotulla_proc_info: ...@@ -808,5 +778,6 @@ __cotulla_proc_info:
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_cotulla_info .long cpu_cotulla_info
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns
.size __cotulla_proc_info, . - __cotulla_proc_info .size __cotulla_proc_info, . - __cotulla_proc_info
/*
* linux/arch/arm/mm/tlbv3.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 3 TLB handling functions.
*
* Processors: ARM610, ARM710.
*/
#include <linux/linkage.h>
#include <asm/constants.h>
#include "proc-macros.S"
.align 5
/*
* v3_flush_user_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v3_flush_user_tlb_mm)
act_mm r1 @ get current->active_mm
teq r0, r1 @ == mm ?
movne pc, lr @ no, we dont do anything
/*
* v3_flush_kern_tlb_all()
*
* Invalidate the entire TLB
*/
ENTRY(v3_flush_kern_tlb_all)
mov r0, #0
mcr p15, 0, r0, c5, c0, 0 @ invalidate TLB
mov pc, lr
/*
* v3_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - range start address
* - end - range end address
* - mm - mm_struct describing address space
*/
.align 5
ENTRY(v3_flush_user_tlb_range)
vma_vm_mm r2, r2
act_mm r3 @ get current->active_mm
teq r2, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v3_flush_user_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address space.
*
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*/
.align 5
ENTRY(v3_flush_user_tlb_page)
vma_vm_mm r2, r1 @ get vma->vm_mm
act_mm r3 @ get current->active_mm
teq r2, r3 @ equal
movne pc, lr @ no
ENTRY(v3_flush_kern_tlb_page)
mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry
mov pc, lr
ENTRY(v3_tlb_fns)
.word v3_flush_kern_tlb_all
.word v3_flush_user_tlb_mm
.word v3_flush_user_tlb_range
.word v3_flush_user_tlb_page
.word v3_flush_kern_tlb_page
/*
* linux/arch/arm/mm/tlbv4.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 4 TLB handling functions.
* These assume a split I/D TLBs, and no write buffer.
*
* Processors: ARM720T
*/
#include <linux/linkage.h>
#include <asm/constants.h>
#include "proc-macros.S"
.align 5
/*
* v4_flush_user_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v4_flush_user_tlb_mm)
act_mm r1 @ get current->active_mm
teq r0, r1 @ == mm ?
movne pc, lr @ no, we dont do anything
/*
* v4_flush_kern_tlb_all()
*
* Invalidate the entire TLB
*/
ENTRY(v4_flush_kern_tlb_all)
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mov pc, lr
/*
* v4_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - range start address
* - end - range end address
* - mm - mm_struct describing address space
*/
.align 5
ENTRY(v4_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
vma_vm_flags ip, r2
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
tst ip, #VM_EXEC
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/*
* v4_flush_user_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address space.
*
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*/
.align 5
ENTRY(v4_flush_user_tlb_page)
vma_vm_mm r2, r1 @ get vma->vm_mm
act_mm r3 @ get current->active_mm
teq r2, r3 @ equal
movne pc, lr @ no
vma_vm_flags r2, r1
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
tst r2, #VM_EXEC
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/*
* v4_flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
*
* - kaddr - Kernel virtual memory address
*/
ENTRY(v4_flush_kern_tlb_page)
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
ENTRY(v4_tlb_fns)
.word v4_flush_kern_tlb_all
.word v4_flush_user_tlb_mm
.word v4_flush_user_tlb_range
.word v4_flush_user_tlb_page
.word v4_flush_kern_tlb_page
/*
* linux/arch/arm/mm/tlbv4wb.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 4 and version 5 TLB handling functions.
* These assume a split I/D TLBs, with a write buffer.
*
* Processors: ARM920 ARM922 ARM926 SA110 SA1100 SA1110 XScale
*/
#include <linux/linkage.h>
#include <asm/constants.h>
#include "proc-macros.S"
.align 5
/*
* v4wb_flush_user_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v4wb_flush_user_tlb_mm)
ENTRY(v4wbi_flush_user_tlb_mm)
act_mm r1 @ get current->active_mm
teq r0, r1 @ == mm ?
movne pc, lr @ no, we dont do anything
/*
* v4wb_flush_tlb_all()
*
* Invalidate the entire TLB
*/
ENTRY(v4wb_flush_kern_tlb_all)
ENTRY(v4wbi_flush_kern_tlb_all)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mov pc, lr
/*
* v4wb_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - range start address
* - end - range end address
* - mm - mm_struct describing address space
*/
.align 5
ENTRY(v4wb_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
vma_vm_flags r2, r2
mcr p15, 0, r3, c7, c10, 4 @ drain WB
tst r2, #VM_EXEC
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v4wb_flush_user_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address space.
*
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*/
.align 5
ENTRY(v4wb_flush_user_tlb_page)
vma_vm_mm r2, r1 @ get vma->vm_mm
act_mm r3 @ get current->active_mm
teq r2, r3 @ equal
movne pc, lr @ no
vma_vm_flags r2, r1
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
tst r2, #VM_EXEC
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
ENTRY(v4wb_flush_kern_tlb_page)
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
/*
* These two are optimised for ARM920, ARM922, ARM926, Xscale
*/
/*
* v4wb_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - range start address
* - end - range end address
* - mm - mm_struct describing address space
*/
.align 5
ENTRY(v4wbi_flush_user_tlb_range)
act_mm r3 @ get current->active_mm
teq r2, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v4wb_flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address space.
*
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*/
.align 5
ENTRY(v4wbi_flush_user_tlb_page)
vma_vm_mm r2, r1 @ get vma->vm_mm
act_mm r3 @ get current->active_mm
teq r2, r3 @ equal
movne pc, lr @ no
vma_vm_flags r2, r1
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
tst r2, #VM_EXEC
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
ENTRY(v4wbi_flush_kern_tlb_page)
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
ENTRY(v4wb_tlb_fns)
.word v4wb_flush_kern_tlb_all
.word v4wb_flush_user_tlb_mm
.word v4wb_flush_user_tlb_range
.word v4wb_flush_user_tlb_page
.word v4wb_flush_kern_tlb_page
ENTRY(v4wbi_tlb_fns)
.word v4wbi_flush_kern_tlb_all
.word v4wbi_flush_user_tlb_mm
.word v4wbi_flush_user_tlb_range
.word v4wbi_flush_user_tlb_page
.word v4wbi_flush_kern_tlb_page
...@@ -93,21 +93,6 @@ extern struct processor { ...@@ -93,21 +93,6 @@ extern struct processor {
void (*invalidate_page)(void *virt_page); void (*invalidate_page)(void *virt_page);
} icache; } icache;
struct { /* TLB */
/*
* flush all TLBs
*/
void (*invalidate_all)(void);
/*
* flush a specific TLB
*/
void (*invalidate_range)(unsigned long address, unsigned long end);
/*
* flush a specific TLB
*/
void (*invalidate_page)(unsigned long address, int flags);
} tlb;
struct { /* PageTable */ struct { /* PageTable */
/* /*
* Set the page table * Set the page table
...@@ -152,10 +137,6 @@ extern const struct processor sa110_processor_functions; ...@@ -152,10 +137,6 @@ extern const struct processor sa110_processor_functions;
#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e) #define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e)
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp) #define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
#define cpu_tlb_invalidate_all() processor.tlb.invalidate_all()
#define cpu_tlb_invalidate_range(s,e) processor.tlb.invalidate_range(s,e)
#define cpu_tlb_invalidate_page(vp,f) processor.tlb.invalidate_page(vp,f)
#define cpu_set_pgd(pgd) processor.pgtable.set_pgd(pgd) #define cpu_set_pgd(pgd) processor.pgtable.set_pgd(pgd)
#define cpu_set_pmd(pmdp, pmd) processor.pgtable.set_pmd(pmdp, pmd) #define cpu_set_pmd(pmdp, pmd) processor.pgtable.set_pmd(pmdp, pmd)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte) #define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
......
...@@ -37,9 +37,6 @@ ...@@ -37,9 +37,6 @@
#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry) #define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry)
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range) #define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page) #define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
#define cpu_tlb_invalidate_all __cpu_fn(CPU_NAME,_tlb_invalidate_all)
#define cpu_tlb_invalidate_range __cpu_fn(CPU_NAME,_tlb_invalidate_range)
#define cpu_tlb_invalidate_page __cpu_fn(CPU_NAME,_tlb_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd) #define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
#define cpu_set_pmd __cpu_fn(CPU_NAME,_set_pmd) #define cpu_set_pmd __cpu_fn(CPU_NAME,_set_pmd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
...@@ -73,10 +70,6 @@ extern void cpu_dcache_clean_entry(unsigned long address); ...@@ -73,10 +70,6 @@ extern void cpu_dcache_clean_entry(unsigned long address);
extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end); extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page); extern void cpu_icache_invalidate_page(void *virt_page);
extern void cpu_tlb_invalidate_all(void);
extern void cpu_tlb_invalidate_range(unsigned long address, unsigned long end);
extern void cpu_tlb_invalidate_page(unsigned long address, int flags);
extern void cpu_set_pgd(unsigned long pgd_phys); extern void cpu_set_pgd(unsigned long pgd_phys);
extern void cpu_set_pmd(pmd_t *pmdp, pmd_t pmd); extern void cpu_set_pmd(pmd_t *pmdp, pmd_t pmd);
extern void cpu_set_pte(pte_t *ptep, pte_t pte); extern void cpu_set_pte(pte_t *ptep, pte_t pte);
......
/*
* linux/include/asm-arm/glue.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file provides the glue to stick the processor-specific bits
* into the kernel in an efficient manner. The idea is to use branches
* when we're only targetting one class of TLB, or indirect calls
* when we're targetting multiple classes of TLBs.
*/
#ifdef __KERNEL__
#include <linux/config.h>
#ifdef __STDC__
#define ____glue(name,fn) name##fn
#else
#define ____glue(name,fn) name/**/fn
#endif
#define __glue(name,fn) ____glue(name,fn)
/*
* Select MMU TLB handling.
*/
/*
* ARMv3 MMU
*/
#undef _TLB
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
#endif
/*
* ARMv4 MMU without write buffer
*/
#if defined(CONFIG_CPU_ARM720T)
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4
# endif
#endif
/*
* ARMv4 MMU with write buffer, with invalidate I TLB entry instruction
*/
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_XSCALE)
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
#endif
/*
* ARMv4 MMU with write buffer, without invalidate I TLB entry instruction
*/
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wb
# endif
#endif
#endif
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/glue.h>
/* /*
* This flag is used to indicate that the page pointed to by a pte * This flag is used to indicate that the page pointed to by a pte
...@@ -200,69 +201,93 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa ...@@ -200,69 +201,93 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa
} while (0) } while (0)
/* /*
* Old ARM MEMC stuff. This supports the reversed mapping handling that * TLB Management
* we have on the older 26-bit machines. We don't have a MEMC chip, so... * ==============
*/ *
#define memc_update_all() do { } while (0) * The arch/arm/mm/tlb-*.S files implement this methods.
#define memc_update_mm(mm) do { } while (0) *
#define memc_update_addr(mm,pte,log) do { } while (0) * The TLB specific code is expected to perform whatever tests it
#define memc_clear(mm,physaddr) do { } while (0) * needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
/* * exclusive; it is safe to round these addresses down.
* TLB flushing. *
* flush_tlb_all()
*
* Invalidate the entire TLB.
* *
* - flush_tlb_all() flushes all processes TLBs * flush_tlb_mm(mm)
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes TLB for specified page
* - flush_tlb_range(vma, start, end) flushes TLB for specified range of pages
* *
* We drain the write buffer in here to ensure that the page tables in ram * Invalidate all TLB entries in a particular address
* are really up to date. It is more efficient to do this here... * space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/ */
/* struct cpu_tlb_fns {
* Notes: void (*flush_kern_all)(void);
* current->active_mm is the currently active memory description. void (*flush_user_mm)(struct mm_struct *);
* current->mm == NULL iff we are lazy. void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
*/ void (*flush_user_page)(unsigned long, struct vm_area_struct *);
#define flush_tlb_all() \ void (*flush_kern_page)(unsigned long);
do { \ };
cpu_tlb_invalidate_all(); \
} while (0)
/* /*
* Flush all user virtual address space translations described by `_mm'. * Convert calls to our calling convention.
*
* Currently, this is always called for current->mm, which should be
* the same as current->active_mm. This is currently not be called for
* the lazy TLB case.
*/ */
#define flush_tlb_mm(_mm) \ #define flush_tlb_all() __cpu_flush_kern_tlb_all()
do { \ #define flush_tlb_mm(mm) __cpu_flush_user_tlb_mm(mm)
if ((_mm) == current->active_mm) \ #define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
cpu_tlb_invalidate_all(); \ #define flush_tlb_page(vma,vaddr) __cpu_flush_user_tlb_page(vaddr,vma)
} while (0) #define flush_kern_tlb_page(kaddr) __cpu_flush_kern_tlb_page(kaddr)
/* /*
* Flush the specified range of user virtual address space translations. * Now select the calling method
*
* _mm may not be current->active_mm, but may not be NULL.
*/ */
#define flush_tlb_range(_vma,_start,_end) \ #ifdef MULTI_TLB
do { \
if ((_mm)->vm_mm == current->active_mm) \
cpu_tlb_invalidate_range((_start), (_end)); \
} while (0)
/* extern struct cpu_tlb_fns cpu_tlb;
* Flush the specified user virtual address space translation.
*/ #define __cpu_flush_kern_tlb_all cpu_tlb.flush_kern_all
#define flush_tlb_page(_vma,_page) \ #define __cpu_flush_user_tlb_mm cpu_tlb.flush_user_mm
do { \ #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
if ((_vma)->vm_mm == current->active_mm) \ #define __cpu_flush_user_tlb_page cpu_tlb.flush_user_page
cpu_tlb_invalidate_page((_page), \ #define __cpu_flush_kern_tlb_page cpu_tlb.flush_kern_page
((_vma)->vm_flags & VM_EXEC)); \
} while (0) #else
#define __cpu_flush_kern_tlb_all __glue(_TLB,_flush_kern_tlb_all)
#define __cpu_flush_user_tlb_mm __glue(_TLB,_flush_user_tlb_mm)
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_user_tlb_page __glue(_TLB,_flush_user_tlb_page)
#define __cpu_flush_kern_tlb_page __glue(_TLB,_flush_kern_tlb_page)
extern void __cpu_flush_kern_tlb_all(void);
extern void __cpu_flush_user_tlb_mm(struct mm_struct *);
extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_page(unsigned long);
#endif
/* /*
* if PG_dcache_dirty is set for the page, we need to ensure that any * if PG_dcache_dirty is set for the page, we need to ensure that any
...@@ -270,3 +295,12 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa ...@@ -270,3 +295,12 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa
* back to the page. * back to the page.
*/ */
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
/*
* Old ARM MEMC stuff. This supports the reversed mapping handling that
* we have on the older 26-bit machines. We don't have a MEMC chip, so...
*/
#define memc_update_all() do { } while (0)
#define memc_update_mm(mm) do { } while (0)
#define memc_update_addr(mm,pte,log) do { } while (0)
#define memc_clear(mm,physaddr) do { } while (0)
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
struct cpu_tlb_fns;
struct processor;
struct proc_info_item { struct proc_info_item {
const char *manufacturer; const char *manufacturer;
const char *cpu_name; const char *cpu_name;
...@@ -37,15 +40,14 @@ struct proc_info_list { ...@@ -37,15 +40,14 @@ struct proc_info_list {
const char *elf_name; const char *elf_name;
unsigned int elf_hwcap; unsigned int elf_hwcap;
struct proc_info_item *info; struct proc_info_item *info;
#ifdef MULTI_CPU
struct processor *proc; struct processor *proc;
#else struct cpu_tlb_fns *tlb;
void *unused;
#endif
}; };
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define PROC_INFO_SZ 40
#define HWCAP_SWP 1 #define HWCAP_SWP 1
#define HWCAP_HALF 2 #define HWCAP_HALF 2
#define HWCAP_THUMB 4 #define HWCAP_THUMB 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment