Commit d37d9068 authored by Russell King's avatar Russell King

[ARM] Part 2 in the cache API changes.

This is the new API; we now have methods for handling DMA which are
separate from those handling the TLB consistency issues, which are
in turn separate from the methods handling the cache coherency
issues.

Implementations are, however, free to alias these methods internally.
parent 10eacf17
...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb; ...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb;
#ifdef MULTI_USER #ifdef MULTI_USER
struct cpu_user_fns cpu_user; struct cpu_user_fns cpu_user;
#endif #endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache;
#endif
unsigned char aux_device_present; unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
...@@ -282,6 +285,9 @@ static void __init setup_processor(void) ...@@ -282,6 +285,9 @@ static void __init setup_processor(void)
#ifdef MULTI_USER #ifdef MULTI_USER
cpu_user = *list->user; cpu_user = *list->user;
#endif #endif
#ifdef MULTI_CACHE
cpu_cache = *list->cache;
#endif
printk("CPU: %s [%08x] revision %d (ARMv%s)\n", printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
cpu_name, processor_id, (int)processor_id & 15, cpu_name, processor_id, (int)processor_id & 15,
......
...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o ...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
p-$(CONFIG_CPU_26) += proc-arm2_3.o p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3 # ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
# ARMv4 # ARMv4
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5 # ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
......
/*
* linux/arch/arm/mm/cache-v3.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v3_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v3_flush_kern_cache_all)
/* FALLTHROUGH */
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v3_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v3_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_clean_range)
mov pc, lr
ENTRY(v3_cache_fns)
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_flush_kern_dcache_page
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v4_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4_flush_kern_cache_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vma)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_clean_range)
mov pc, lr
ENTRY(v4_cache_fns)
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_flush_kern_dcache_page
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wb.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The total size of the data cache.
*/
#if defined(CONFIG_CPU_SA110)
# define CACHE_DSIZE 16384
#elif defined(CONFIG_CPU_SA1100)
# define CACHE_DSIZE 8192
#else
# error Unknown cache size
#endif
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* Size Clean (ticks) Dirty (ticks)
* 4096 21 20 21 53 55 54
* 8192 40 41 40 106 100 102
* 16384 77 77 76 140 140 138
* 32768 150 149 150 214 216 212 <---
* 65536 296 297 296 351 358 361
* 131072 591 591 591 656 657 651
* Whole 132 136 132 221 217 207 <---
*/
#define CACHE_DLIMIT (CACHE_DSIZE * 4)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wb_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
mov r0, #FLUSH_BASE
add r1, r0, #CACHE_DSIZE
1: ldr r2, [r0], #32
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wb_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
cmp r3, #CACHE_DLIMIT @ total size >= limit?
bhs __flush_whole_cache @ flush whole D cache
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wb_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
/* fall through */
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* This is actually the same as v4wb_coherent_kern_range()
*/
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_flush_kern_dcache_page
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wt.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARMv4 write through cache operations support.
*
* We assume that the write buffer is not enabled.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 8
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* *** This needs benchmarking
*/
#define CACHE_DLIMIT 16384
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wt_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Clean and invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wt_flush_kern_dcache_page)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ
/* fallthrough */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_flush_kern_dcache_page
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page) ...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page)
{ {
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
struct list_head *l; struct list_head *l;
unsigned long kaddr = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
if (!page->mapping) if (!page->mapping)
return; return;
...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page->mapping) { if (page->mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
unsigned long kaddr = (unsigned long)page_address(page);
if (dirty) if (dirty)
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
make_coherent(vma, addr, page, dirty); make_coherent(vma, addr, page, dirty);
} }
......
This diff is collapsed.
...@@ -37,35 +37,6 @@ ...@@ -37,35 +37,6 @@
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
/*
* Function: arm720_cache_clean_invalidate_all (void)
* : arm720_cache_clean_invalidate_page (unsigned long address, int size,
* int flags)
*
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
*
* Purpose : Flush all cache lines
*/
ENTRY(cpu_arm720_cache_clean_invalidate_all)
ENTRY(cpu_arm720_cache_clean_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_page)
ENTRY(cpu_arm720_dcache_invalidate_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush cache
mov pc, lr
/*
* These just expect cache lines to be cleaned. Since we have a writethrough
* cache, we never have any dirty cachelines to worry about.
*/
ENTRY(cpu_arm720_dcache_clean_range)
ENTRY(cpu_arm720_dcache_clean_page)
ENTRY(cpu_arm720_dcache_clean_entry)
mov pc, lr
/* /*
* Function: arm720_check_bugs (void) * Function: arm720_check_bugs (void)
* : arm720_proc_init (void) * : arm720_proc_init (void)
...@@ -79,6 +50,7 @@ ENTRY(cpu_arm720_check_bugs) ...@@ -79,6 +50,7 @@ ENTRY(cpu_arm720_check_bugs)
msr cpsr, ip msr cpsr, ip
mov pc, lr mov pc, lr
ENTRY(cpu_arm720_dcache_clean_area)
ENTRY(cpu_arm720_proc_init) ENTRY(cpu_arm720_proc_init)
mov pc, lr mov pc, lr
...@@ -130,7 +102,7 @@ ENTRY(cpu_arm720_set_pte) ...@@ -130,7 +102,7 @@ ENTRY(cpu_arm720_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -189,25 +161,9 @@ ENTRY(arm720_processor_functions) ...@@ -189,25 +161,9 @@ ENTRY(arm720_processor_functions)
.word cpu_arm720_proc_fin .word cpu_arm720_proc_fin
.word cpu_arm720_reset .word cpu_arm720_reset
.word cpu_arm720_do_idle .word cpu_arm720_do_idle
.word cpu_arm720_dcache_clean_area
/* cache */
.word cpu_arm720_cache_clean_invalidate_all
.word cpu_arm720_cache_clean_invalidate_range
/* dcache */
.word cpu_arm720_dcache_invalidate_range
.word cpu_arm720_dcache_clean_range
.word cpu_arm720_dcache_clean_page
.word cpu_arm720_dcache_clean_entry
/* icache */
.word cpu_arm720_icache_invalidate_range
.word cpu_arm720_icache_invalidate_page
/* pgtable */
.word cpu_arm720_set_pgd .word cpu_arm720_set_pgd
.word cpu_arm720_set_pte .word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions .size arm720_processor_functions, . - arm720_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -238,4 +194,5 @@ __arm720_proc_info: ...@@ -238,4 +194,5 @@ __arm720_proc_info:
.long arm720_processor_functions .long arm720_processor_functions
.long v4_tlb_fns .long v4_tlb_fns
.long v4wt_user_fns .long v4wt_user_fns
.long v4_cache_fns
.size __arm720_proc_info, . - __arm720_proc_info .size __arm720_proc_info, . - __arm720_proc_info
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
* functions on the StrongARM-1100 and StrongARM-1110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
#define FLUSH_OFFSET 32768
.macro flush_1100_dcache rd, ra, re
ldr \rd, =flush_base
ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data
flush_base:
.long FLUSH_BASE
.text
/*
* cpu_sa1100_check_bugs()
*/
ENTRY(cpu_sa1100_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
__INIT
/*
* cpu_sa1100_proc_init()
*/
ENTRY(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
.previous
/*
* cpu_sa1100_proc_fin()
*
* Prepare the CPU for reset:
* - Disable interrupts
* - Clean and turn off caches.
*/
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
flush_1100_dcache r0, r1, r2 @ clean caches
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_sa1100_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_sa1100_do_idle(type)
*
* Cause the processor to idle
*
* type: call type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*/
.align 5
ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
mov pc, lr
/* ================================= CACHE ================================ */
/*
* cpu_sa1100_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_sa1100_set_pgd(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_set_pgd)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa1100_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa1100_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
str r2, [r0] @ hardware version
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align
__INIT
__sa1100_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mcr p15, 0, r4, c2, c0 @ load page table pointer
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_check_bugs
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
.word cpu_sa1100_dcache_clean_area
.word cpu_sa1100_set_pgd
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
This diff is collapsed.
...@@ -46,85 +46,31 @@ extern struct processor { ...@@ -46,85 +46,31 @@ extern struct processor {
/* /*
* Processor architecture specific * Processor architecture specific
*/ */
struct { /* CACHE */ /*
/* * clean a virtual address range from the
* flush all caches * D-cache without flushing the cache.
*/ */
void (*clean_invalidate_all)(void); void (*dcache_clean_area)(void *addr, int size);
/*
* flush a specific page or pages
*/
void (*clean_invalidate_range)(unsigned long address, unsigned long end, int flags);
} cache;
struct { /* D-cache */
/*
* invalidate the specified data range
*/
void (*invalidate_range)(unsigned long start, unsigned long end);
/*
* clean specified data range
*/
void (*clean_range)(unsigned long start, unsigned long end);
/*
* obsolete flush cache entry
*/
void (*clean_page)(void *virt_page);
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*clean_entry)(unsigned long start);
} dcache;
struct { /* I-cache */
/*
* invalidate the I-cache for the specified range
*/
void (*invalidate_range)(unsigned long start, unsigned long end);
/*
* invalidate the I-cache for the specified virtual page
*/
void (*invalidate_page)(void *virt_page);
} icache;
struct { /* PageTable */ /*
/* * Set the page table
* Set the page table */
*/ void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm); /*
/* * Set a PTE
* Set a PTE */
*/ void (*set_pte)(pte_t *ptep, pte_t pte);
void (*set_pte)(pte_t *ptep, pte_t pte);
} pgtable;
} processor; } processor;
extern const struct processor arm6_processor_functions; #define cpu_check_bugs() processor._check_bugs()
extern const struct processor arm7_processor_functions; #define cpu_proc_init() processor._proc_init()
extern const struct processor sa110_processor_functions; #define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_check_bugs() processor._check_bugs() #define cpu_do_idle() processor._do_idle()
#define cpu_proc_init() processor._proc_init() #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_proc_fin() processor._proc_fin() #define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte)
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_cache_clean_invalidate_all() processor.cache.clean_invalidate_all()
#define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f)
#define cpu_dcache_clean_page(vp) processor.dcache.clean_page(vp)
#define cpu_dcache_clean_entry(addr) processor.dcache.clean_entry(addr)
#define cpu_dcache_clean_range(s,e) processor.dcache.clean_range(s,e)
#define cpu_dcache_invalidate_range(s,e) processor.dcache.invalidate_range(s,e)
#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e)
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm) #define cpu_switch_mm(pgd,mm) processor.switch_mm(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_get_pgd() \ #define cpu_get_pgd() \
({ \ ({ \
......
...@@ -27,14 +27,7 @@ ...@@ -27,14 +27,7 @@
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset) #define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
#define cpu_cache_clean_invalidate_all __cpu_fn(CPU_NAME,_cache_clean_invalidate_all) #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
#define cpu_cache_clean_invalidate_range __cpu_fn(CPU_NAME,_cache_clean_invalidate_range)
#define cpu_dcache_invalidate_range __cpu_fn(CPU_NAME,_dcache_invalidate_range)
#define cpu_dcache_clean_range __cpu_fn(CPU_NAME,_dcache_clean_range)
#define cpu_dcache_clean_page __cpu_fn(CPU_NAME,_dcache_clean_page)
#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry)
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd) #define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
...@@ -46,23 +39,11 @@ ...@@ -46,23 +39,11 @@
struct mm_struct; struct mm_struct;
/* declare all the functions as extern */ /* declare all the functions as extern */
extern void cpu_data_abort(unsigned long pc);
extern void cpu_check_bugs(void); extern void cpu_check_bugs(void);
extern void cpu_proc_init(void); extern void cpu_proc_init(void);
extern void cpu_proc_fin(void); extern void cpu_proc_fin(void);
extern int cpu_do_idle(void); extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_cache_clean_invalidate_all(void);
extern void cpu_cache_clean_invalidate_range(unsigned long address, unsigned long end, int flags);
extern void cpu_dcache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_dcache_clean_range(unsigned long start, unsigned long end);
extern void cpu_dcache_clean_page(void *virt_page);
extern void cpu_dcache_clean_entry(unsigned long address);
extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page);
extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte(pte_t *ptep, pte_t pte); extern void cpu_set_pte(pte_t *ptep, pte_t pte);
......
...@@ -10,6 +10,62 @@ ...@@ -10,6 +10,62 @@
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/glue.h> #include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/* /*
* This flag is used to indicate that the page pointed to by a pte * This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user. * is dirty and requires cleaning before returning it to the user.
...@@ -17,16 +73,94 @@ ...@@ -17,16 +73,94 @@
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
/* /*
* Cache handling for 32-bit ARM processors. * MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_user_mm(mm)
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
* *
* Note that on ARM, we have a more accurate specification than that * Invalidate (discard) the specified virtual address range.
* Linux's "flush". We therefore do not use "flush" here, but instead * May not write back any entries. If 'start' or 'end'
* use: * are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
* *
* clean: the act of pushing dirty cache entries out to memory. * dma_clean_range(start, end)
* invalidate: the act of discarding data held within the cache, *
* whether it is dirty or not. * Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct cpu_cache_fns {
void (*flush_kern_all)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *);
void (*dma_inv_range)(unsigned long, unsigned long);
void (*dma_clean_range)(unsigned long, unsigned long);
void (*dma_flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/ */
#ifdef MULTI_CACHE
extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/* /*
* These are private to the dma-mapping API. Do not use directly. * These are private to the dma-mapping API. Do not use directly.
...@@ -34,27 +168,57 @@ ...@@ -34,27 +168,57 @@
* is visible to DMA, or data written by DMA to system memory is * is visible to DMA, or data written by DMA to system memory is
* visible to the CPU. * visible to the CPU.
*/ */
#define dmac_inv_range cpu_dcache_invalidate_range #define dmac_inv_range cpu_cache.dma_inv_range
#define dmac_clean_range cpu_dcache_clean_range #define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) #define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_inv_range(unsigned long, unsigned long);
extern void dmac_clean_range(unsigned long, unsigned long);
extern void dmac_flush_range(unsigned long, unsigned long);
#endif
/* /*
* Convert calls to our calling convention. * Convert calls to our calling convention.
*/ */
#define flush_cache_all() cpu_cache_clean_invalidate_all() #define flush_cache_all() __cpuc_flush_kern_all()
static inline void flush_cache_mm(struct mm_struct *mm) static inline void flush_cache_mm(struct mm_struct *mm)
{ {
if (current->active_mm == mm) if (current->active_mm == mm)
cpu_cache_clean_invalidate_all(); __cpuc_flush_user_all();
} }
static inline void static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (current->active_mm == vma->vm_mm) if (current->active_mm == vma->vm_mm)
cpu_cache_clean_invalidate_range(start & PAGE_MASK, __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
PAGE_ALIGN(end), vma->vm_flags); vma->vm_flags);
} }
static inline void static inline void
...@@ -62,8 +226,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) ...@@ -62,8 +226,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{ {
if (current->active_mm == vma->vm_mm) { if (current->active_mm == vma->vm_mm) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
cpu_cache_clean_invalidate_range(addr, addr + PAGE_SIZE, __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
vma->vm_flags & VM_EXEC);
} }
} }
...@@ -71,15 +234,13 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) ...@@ -71,15 +234,13 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
* Perform necessary cache operations to ensure that data previously * Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU. * stored within this range of addresses can be executed by the CPU.
*/ */
#define flush_icache_range(s,e) cpu_icache_invalidate_range(s,e) #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/* /*
* Perform necessary cache operations to ensure that the TLB will * Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area. * see data written in the specified area.
*/ */
#define clean_dcache_area(start,size) \ #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
cpu_cache_clean_invalidate_range((unsigned long)start, \
((unsigned long)start) + size, 0);
/* /*
* flush_dcache_page is used when the kernel has written to the page * flush_dcache_page is used when the kernel has written to the page
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
struct cpu_tlb_fns; struct cpu_tlb_fns;
struct cpu_user_fns; struct cpu_user_fns;
struct cpu_cache_fns;
struct processor; struct processor;
/* /*
...@@ -37,13 +38,14 @@ struct proc_info_list { ...@@ -37,13 +38,14 @@ struct proc_info_list {
struct processor *proc; struct processor *proc;
struct cpu_tlb_fns *tlb; struct cpu_tlb_fns *tlb;
struct cpu_user_fns *user; struct cpu_user_fns *user;
struct cpu_cache_fns *cache;
}; };
extern unsigned int elf_hwcap; extern unsigned int elf_hwcap;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define PROC_INFO_SZ 44 #define PROC_INFO_SZ 48
#define HWCAP_SWP 1 #define HWCAP_SWP 1
#define HWCAP_HALF 2 #define HWCAP_HALF 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment