Commit bf3abfa9 authored by Russell King's avatar Russell King

Update ARM tree for 2.5.6

parent c666961d
...@@ -181,7 +181,7 @@ static void adjust_pte(struct vm_area_struct *vma, unsigned long address) ...@@ -181,7 +181,7 @@ static void adjust_pte(struct vm_area_struct *vma, unsigned long address)
static void static void
make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page) make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
{ {
struct vm_area_struct *mpnt; struct list_head *l;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
int aliases = 0; int aliases = 0;
...@@ -191,10 +191,12 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page) ...@@ -191,10 +191,12 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
* space, then we need to handle them specially to maintain * space, then we need to handle them specially to maintain
* cache coherency. * cache coherency.
*/ */
for (mpnt = page->mapping->i_mmap_shared; mpnt; list_for_each(l, &page->mapping->i_mmap_shared) {
mpnt = mpnt->vm_next_share) { struct vm_area_struct *mpnt;
unsigned long off; unsigned long off;
mpnt = list_entry(l, struct vm_area_struct, shared);
/* /*
* If this VMA is not in our MM, we can ignore it. * If this VMA is not in our MM, we can ignore it.
* Note that we intentionally don't mask out the VMA * Note that we intentionally don't mask out the VMA
......
/* /*
* linux/include/asm-arm/proc-armv/cache.h * linux/include/asm-arm/proc-armv/cache.h
* *
* Copyright (C) 1999-2001 Russell King * Copyright (C) 1999-2002 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -134,7 +134,8 @@ static __inline__ void flush_page_to_ram(struct page *page) ...@@ -134,7 +134,8 @@ static __inline__ void flush_page_to_ram(struct page *page)
#define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e)) #define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e))
#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) #define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)
#define mapping_mapped(map) ((map)->i_mmap || (map)->i_mmap_shared) #define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
/* /*
* flush_dcache_page is used when the kernel has written to the page * flush_dcache_page is used when the kernel has written to the page
...@@ -204,7 +205,7 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa ...@@ -204,7 +205,7 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa
* TLB Management * TLB Management
* ============== * ==============
* *
* The arch/arm/mm/tlb-*.S files implement this methods. * The arch/arm/mm/tlb-*.S files implement these methods.
* *
* The TLB specific code is expected to perform whatever tests it * The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each * needs to determine if it should invalidate the TLB for each
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment