Commit 2b7df078 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/flush_cache_page-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 61f80950 28ff874f
...@@ -155,7 +155,7 @@ the sequence will be in one of the following forms: ...@@ -155,7 +155,7 @@ the sequence will be in one of the following forms:
change_range_of_page_tables(mm, start, end); change_range_of_page_tables(mm, start, end);
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
3) flush_cache_page(vma, addr); 3) flush_cache_page(vma, addr, pfn);
set_pte(pte_pointer, new_pte_val); set_pte(pte_pointer, new_pte_val);
flush_tlb_page(vma, addr); flush_tlb_page(vma, addr);
...@@ -203,7 +203,7 @@ Here are the routines, one by one: ...@@ -203,7 +203,7 @@ Here are the routines, one by one:
call flush_cache_page (see below) for each entry which may be call flush_cache_page (see below) for each entry which may be
modified. modified.
3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr) 3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
This time we need to remove a PAGE_SIZE sized range This time we need to remove a PAGE_SIZE sized range
from the cache. The 'vma' is the backing structure used by from the cache. The 'vma' is the backing structure used by
...@@ -213,8 +213,14 @@ Here are the routines, one by one: ...@@ -213,8 +213,14 @@ Here are the routines, one by one:
executable (and thus could be in the 'instruction cache' in executable (and thus could be in the 'instruction cache' in
"Harvard" type cache layouts). "Harvard" type cache layouts).
The 'pfn' indicates the physical page frame (shift this value
left by PAGE_SHIFT to get the physical address) that 'addr'
translates to. It is this mapping which should be removed from
the cache.
After running, there will be no entries in the cache for After running, there will be no entries in the cache for
'vma->vm_mm' for virtual address 'addr'. 'vma->vm_mm' for virtual address 'addr' which translates
to 'pfn'.
This is used primarily during fault processing. This is used primarily during fault processing.
......
...@@ -54,7 +54,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) ...@@ -54,7 +54,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
* fault (ie, is old), we can safely ignore any issues. * fault (ie, is old), we can safely ignore any issues.
*/ */
if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
flush_cache_page(vma, address); flush_cache_page(vma, address, pte_pfn(entry));
pte_val(entry) &= ~shared_pte_mask; pte_val(entry) &= ~shared_pte_mask;
set_pte(pte, entry); set_pte(pte, entry);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
...@@ -115,7 +115,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, ...@@ -115,7 +115,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
if (aliases) if (aliases)
adjust_pte(vma, addr); adjust_pte(vma, addr);
else else
flush_cache_page(vma, addr); flush_cache_page(vma, addr, page_to_pfn(page));
} }
/* /*
......
...@@ -56,7 +56,7 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page ...@@ -56,7 +56,7 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
if (!(mpnt->vm_flags & VM_MAYSHARE)) if (!(mpnt->vm_flags & VM_MAYSHARE))
continue; continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset); flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
if (cache_is_vipt()) if (cache_is_vipt())
break; break;
} }
......
...@@ -254,8 +254,7 @@ static void r3k_flush_cache_range(struct vm_area_struct *vma, ...@@ -254,8 +254,7 @@ static void r3k_flush_cache_range(struct vm_area_struct *vma,
{ {
} }
static void r3k_flush_cache_page(struct vm_area_struct *vma, static void r3k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
unsigned long page)
{ {
} }
......
...@@ -426,8 +426,7 @@ static inline void local_r4k_flush_cache_page(void *args) ...@@ -426,8 +426,7 @@ static inline void local_r4k_flush_cache_page(void *args)
} }
} }
static void r4k_flush_cache_page(struct vm_area_struct *vma, static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
unsigned long page)
{ {
struct flush_cache_page_args args; struct flush_cache_page_args args;
......
...@@ -160,8 +160,7 @@ static inline void __sb1_flush_icache_all(void) ...@@ -160,8 +160,7 @@ static inline void __sb1_flush_icache_all(void)
* dcache first, then invalidate the icache. If the page isn't * dcache first, then invalidate the icache. If the page isn't
* executable, nothing is required. * executable, nothing is required.
*/ */
static void local_sb1_flush_cache_page(struct vm_area_struct *vma, static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
unsigned long addr)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -183,17 +182,18 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, ...@@ -183,17 +182,18 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma,
struct flush_cache_page_args { struct flush_cache_page_args {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr; unsigned long addr;
unsigned long pfn;
}; };
static void sb1_flush_cache_page_ipi(void *info) static void sb1_flush_cache_page_ipi(void *info)
{ {
struct flush_cache_page_args *args = info; struct flush_cache_page_args *args = info;
local_sb1_flush_cache_page(args->vma, args->addr); local_sb1_flush_cache_page(args->vma, args->addr, args->pfn);
} }
/* Dirty dcache could be on another CPU, so do the IPIs */ /* Dirty dcache could be on another CPU, so do the IPIs */
static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr) static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
{ {
struct flush_cache_page_args args; struct flush_cache_page_args args;
...@@ -203,10 +203,11 @@ static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr) ...@@ -203,10 +203,11 @@ static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
addr &= PAGE_MASK; addr &= PAGE_MASK;
args.vma = vma; args.vma = vma;
args.addr = addr; args.addr = addr;
args.pfn = pfn;
on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
} }
#else #else
void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr) void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
__attribute__((alias("local_sb1_flush_cache_page"))); __attribute__((alias("local_sb1_flush_cache_page")));
#endif #endif
......
...@@ -178,8 +178,7 @@ static void tx39_flush_cache_range(struct vm_area_struct *vma, ...@@ -178,8 +178,7 @@ static void tx39_flush_cache_range(struct vm_area_struct *vma,
} }
} }
static void tx39_flush_cache_page(struct vm_area_struct *vma, static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
unsigned long page)
{ {
int exec = vma->vm_flags & VM_EXEC; int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
......
...@@ -23,7 +23,7 @@ void (*__flush_cache_all)(void); ...@@ -23,7 +23,7 @@ void (*__flush_cache_all)(void);
void (*flush_cache_mm)(struct mm_struct *mm); void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page); void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
void (*flush_icache_range)(unsigned long start, unsigned long end); void (*flush_icache_range)(unsigned long start, unsigned long end);
void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page);
......
...@@ -258,10 +258,16 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -258,10 +258,16 @@ void flush_cache_mm(struct mm_struct *mm)
flush_cache_all(); flush_cache_all();
} }
static void __flush_cache_page(struct vm_area_struct *vma, /*
unsigned long address, * Write back and invalidate I/D-caches for the page.
unsigned long phys) *
* ADDR: Virtual Address (U0 address)
* PFN: Physical page number
*/
void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
{ {
unsigned long phys = pfn << PAGE_SHIFT;
/* We only need to flush D-cache when we have alias */ /* We only need to flush D-cache when we have alias */
if ((address^phys) & CACHE_ALIAS) { if ((address^phys) & CACHE_ALIAS) {
/* Loop 4K of the D-cache */ /* Loop 4K of the D-cache */
...@@ -341,32 +347,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, ...@@ -341,32 +347,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
flush_icache_all(); flush_icache_all();
} }
/*
* Write back and invalidate I/D-caches for the page.
*
* ADDR: Virtual Address (U0 address)
*/
void flush_cache_page(struct vm_area_struct *vma, unsigned long address)
{
pgd_t *dir;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
unsigned long phys;
dir = pgd_offset(vma->vm_mm, address);
pmd = pmd_offset(dir, address);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (!(pte_val(entry) & _PAGE_PRESENT))
return;
phys = pte_val(entry)&PTE_PHYS_MASK;
__flush_cache_page(vma, address, phys);
}
/* /*
* flush_icache_user_range * flush_icache_user_range
* @vma: VMA of the process * @vma: VMA of the process
...@@ -377,6 +357,6 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address) ...@@ -377,6 +357,6 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address)
void flush_icache_user_range(struct vm_area_struct *vma, void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len) struct page *page, unsigned long addr, int len)
{ {
__flush_cache_page(vma, addr, PHYSADDR(page_address(page))); flush_cache_page(vma, addr, page_to_pfn(page));
} }
...@@ -186,25 +186,9 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, ...@@ -186,25 +186,9 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
* *
* ADDRESS: Virtual Address (U0 address) * ADDRESS: Virtual Address (U0 address)
*/ */
void flush_cache_page(struct vm_area_struct *vma, unsigned long address) void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
{ {
pgd_t *dir; __flush_dcache_page(pfn << PAGE_SHIFT);
pmd_t *pmd;
pte_t *pte;
pte_t entry;
unsigned long phys;
dir = pgd_offset(vma->vm_mm, address);
pmd = pmd_offset(dir, address);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
pte = pte_offset(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return;
phys = pte_val(entry)&PTE_PHYS_MASK;
__flush_dcache_page(phys);
} }
/* /*
......
...@@ -573,31 +573,6 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr) ...@@ -573,31 +573,6 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
} }
} }
static void sh64_dcache_purge_virt_page(struct mm_struct *mm, unsigned long eaddr)
{
unsigned long phys;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
pgd = pgd_offset(mm, eaddr);
pmd = pmd_offset(pgd, eaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
pte = pte_offset_kernel(pmd, eaddr);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return;
phys = pte_val(entry) & PAGE_MASK;
sh64_dcache_purge_phy_page(phys);
}
static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -904,7 +879,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, ...@@ -904,7 +879,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
/****************************************************************************/ /****************************************************************************/
void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr) void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
{ {
/* Invalidate any entries in either cache for the vma within the user /* Invalidate any entries in either cache for the vma within the user
address space vma->vm_mm for the page starting at virtual address address space vma->vm_mm for the page starting at virtual address
...@@ -915,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr) ...@@ -915,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr)
Note(1), this is called with mm->page_table_lock held. Note(1), this is called with mm->page_table_lock held.
*/ */
sh64_dcache_purge_virt_page(vma->vm_mm, eaddr); sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
if (vma->vm_flags & VM_EXEC) { if (vma->vm_flags & VM_EXEC) {
sh64_icache_inv_user_page(vma, eaddr); sh64_icache_inv_user_page(vma, eaddr);
......
...@@ -1003,8 +1003,7 @@ extern void viking_flush_cache_all(void); ...@@ -1003,8 +1003,7 @@ extern void viking_flush_cache_all(void);
extern void viking_flush_cache_mm(struct mm_struct *mm); extern void viking_flush_cache_mm(struct mm_struct *mm);
extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void viking_flush_cache_page(struct vm_area_struct *vma, extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
unsigned long page);
extern void viking_flush_page_to_ram(unsigned long page); extern void viking_flush_page_to_ram(unsigned long page);
extern void viking_flush_page_for_dma(unsigned long page); extern void viking_flush_page_for_dma(unsigned long page);
extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
......
...@@ -1603,7 +1603,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) ...@@ -1603,7 +1603,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
DUMP_SEEK (file->f_pos + PAGE_SIZE); DUMP_SEEK (file->f_pos + PAGE_SIZE);
} else { } else {
void *kaddr; void *kaddr;
flush_cache_page(vma, addr); flush_cache_page(vma, addr, page_to_pfn(page));
kaddr = kmap(page); kaddr = kmap(page);
if ((size += PAGE_SIZE) > limit || if ((size += PAGE_SIZE) > limit ||
!dump_write(file, kaddr, !dump_write(file, kaddr,
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -237,16 +237,16 @@ extern void dmac_flush_range(unsigned long, unsigned long); ...@@ -237,16 +237,16 @@ extern void dmac_flush_range(unsigned long, unsigned long);
* space" model to handle this. * space" model to handle this.
*/ */
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_dcache_page(page); \ flush_dcache_page(page); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
/* /*
...@@ -269,7 +269,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -269,7 +269,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
} }
static inline void static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{ {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0) #define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr) do { } while (0) #define flush_cache_page(vma,vmaddr,pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define flush_cache_all() do {} while(0) #define flush_cache_all() do {} while(0)
#define flush_cache_mm(mm) do {} while(0) #define flush_cache_mm(mm) do {} while(0)
#define flush_cache_range(mm, start, end) do {} while(0) #define flush_cache_range(mm, start, end) do {} while(0)
#define flush_cache_page(vma, vmaddr) do {} while(0) #define flush_cache_page(vma, vmaddr, pfn) do {} while(0)
#define flush_cache_vmap(start, end) do {} while(0) #define flush_cache_vmap(start, end) do {} while(0)
#define flush_cache_vunmap(start, end) do {} while(0) #define flush_cache_vunmap(start, end) do {} while(0)
#define flush_dcache_mmap_lock(mapping) do {} while(0) #define flush_dcache_mmap_lock(mapping) do {} while(0)
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define flush_cache_all() #define flush_cache_all()
#define flush_cache_mm(mm) #define flush_cache_mm(mm)
#define flush_cache_range(vma,a,b) #define flush_cache_range(vma,a,b)
#define flush_cache_page(vma,p) #define flush_cache_page(vma,p,pfn)
#define flush_dcache_page(page) #define flush_dcache_page(page)
#define flush_dcache_mmap_lock(mapping) #define flush_dcache_mmap_lock(mapping)
#define flush_dcache_mmap_unlock(mapping) #define flush_dcache_mmap_unlock(mapping)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0) #define flush_icache_page(vma,page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -11,7 +11,7 @@ extern void _flush_cache_copyback_all(void); ...@@ -11,7 +11,7 @@ extern void _flush_cache_copyback_all(void);
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
...@@ -31,7 +31,7 @@ extern void smp_flush_cache_all(void); ...@@ -31,7 +31,7 @@ extern void smp_flush_cache_all(void);
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
...@@ -43,7 +43,7 @@ extern void smp_flush_cache_all(void); ...@@ -43,7 +43,7 @@ extern void smp_flush_cache_all(void);
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -99,8 +99,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma, ...@@ -99,8 +99,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
__flush_cache_030(); __flush_cache_030();
} }
static inline void flush_cache_page(struct vm_area_struct *vma, static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
unsigned long vmaddr)
{ {
if (vma->vm_mm == current->mm) if (vma->vm_mm == current->mm)
__flush_cache_030(); __flush_cache_030();
...@@ -134,15 +133,15 @@ static inline void __flush_page_to_ram(void *vaddr) ...@@ -134,15 +133,15 @@ static inline void __flush_page_to_ram(void *vaddr)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
extern void flush_icache_range(unsigned long address, unsigned long endaddr); extern void flush_icache_range(unsigned long address, unsigned long endaddr);
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#define flush_cache_all() __flush_cache_all() #define flush_cache_all() __flush_cache_all()
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_range(start,len) do { } while (0) #define flush_dcache_range(start,len) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* *
* - flush_cache_all() flushes entire cache * - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines * - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr) flushes a single page * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages * - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions * - flush_icache_range(start, end) flush a range of instructions
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
...@@ -34,8 +34,7 @@ extern void (*__flush_cache_all)(void); ...@@ -34,8 +34,7 @@ extern void (*__flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm); extern void (*flush_cache_mm)(struct mm_struct *mm);
extern void (*flush_cache_range)(struct vm_area_struct *vma, extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
unsigned long page);
extern void __flush_dcache_page(struct page *page); extern void __flush_dcache_page(struct page *page);
static inline void flush_dcache_page(struct page *page) static inline void flush_dcache_page(struct page *page)
......
...@@ -67,14 +67,14 @@ extern void flush_dcache_page(struct page *page); ...@@ -67,14 +67,14 @@ extern void flush_dcache_page(struct page *page);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
...@@ -170,7 +170,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -170,7 +170,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
} }
static inline void static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{ {
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0) #define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0) #define flush_cache_page(vma, p, pfn) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_page(vma, page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_page(vma, page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -15,14 +15,14 @@ extern void __flush_invalidate_region(void *start, int size); ...@@ -15,14 +15,14 @@ extern void __flush_invalidate_region(void *start, int size);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_range(vma, page, vaddr, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* - flush_cache_all() flushes entire cache * - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines * - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr) flushes a single page * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages * - flush_cache_range(vma, start, end) flushes a range of pages
* *
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* - flush_cache_all() flushes entire cache * - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines * - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr) flushes a single page * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages * - flush_cache_range(vma, start, end) flushes a range of pages
* *
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
...@@ -43,7 +43,7 @@ extern void flush_cache_all(void); ...@@ -43,7 +43,7 @@ extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg); extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
...@@ -68,7 +68,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); ...@@ -68,7 +68,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -28,7 +28,7 @@ extern void flush_cache_all(void); ...@@ -28,7 +28,7 @@ extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg); extern void flush_dcache_page(struct page *pg);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
......
...@@ -14,7 +14,7 @@ extern void flush_cache_mm(struct mm_struct *mm); ...@@ -14,7 +14,7 @@ extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_sigtramp(unsigned long start, unsigned long end); extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg); extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_user_range(struct vm_area_struct *vma, extern void flush_icache_user_range(struct vm_area_struct *vma,
...@@ -31,14 +31,14 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, ...@@ -31,14 +31,14 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_range(vma, page, vaddr, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
......
...@@ -50,21 +50,21 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long) ...@@ -50,21 +50,21 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)() #define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm) #define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end) #define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr) BTFIXUP_CALL(flush_cache_page)(vma,addr) #define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0) #define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long) BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
do { if ((__mm) == current->mm) flushw_user(); } while(0) do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_range(vma, start, end) \ #define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm) flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page) \ #define flush_cache_page(vma, page, pfn) \
flush_cache_mm((vma)->vm_mm) flush_cache_mm((vma)->vm_mm)
/* /*
...@@ -38,15 +38,15 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end); ...@@ -38,15 +38,15 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr); \ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define flush_cache_all() ((void)0) #define flush_cache_all() ((void)0)
#define flush_cache_mm(mm) ((void)0) #define flush_cache_mm(mm) ((void)0)
#define flush_cache_range(vma, start, end) ((void)0) #define flush_cache_range(vma, start, end) ((void)0)
#define flush_cache_page(vma, vmaddr) ((void)0) #define flush_cache_page(vma, vmaddr, pfn) ((void)0)
#define flush_dcache_page(page) ((void)0) #define flush_dcache_page(page) ((void)0)
#define flush_dcache_mmap_lock(mapping) ((void)0) #define flush_dcache_mmap_lock(mapping) ((void)0)
#define flush_dcache_mmap_unlock(mapping) ((void)0) #define flush_dcache_mmap_unlock(mapping) ((void)0)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -30,7 +30,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -30,7 +30,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if (pte_present(pte)) { if (pte_present(pte)) {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
flush_cache_page(vma, addr); flush_cache_page(vma, addr, pfn);
pte = ptep_clear_flush(vma, addr, ptep); pte = ptep_clear_flush(vma, addr, ptep);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
......
...@@ -1250,7 +1250,6 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page ...@@ -1250,7 +1250,6 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page
{ {
pte_t entry; pte_t entry;
flush_cache_page(vma, address);
entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)), entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
vma); vma);
ptep_establish(vma, address, page_table, entry); ptep_establish(vma, address, page_table, entry);
...@@ -1302,7 +1301,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1302,7 +1301,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
int reuse = can_share_swap_page(old_page); int reuse = can_share_swap_page(old_page);
unlock_page(old_page); unlock_page(old_page);
if (reuse) { if (reuse) {
flush_cache_page(vma, address); flush_cache_page(vma, address, pfn);
entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)), entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
vma); vma);
ptep_set_access_flags(vma, address, page_table, entry, 1); ptep_set_access_flags(vma, address, page_table, entry, 1);
...@@ -1345,6 +1344,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1345,6 +1344,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
++mm->rss; ++mm->rss;
else else
page_remove_rmap(old_page); page_remove_rmap(old_page);
flush_cache_page(vma, address, pfn);
break_cow(vma, new_page, address, page_table); break_cow(vma, new_page, address, page_table);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_anon_rmap(new_page, vma, address);
......
...@@ -573,7 +573,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) ...@@ -573,7 +573,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
} }
/* Nuke the page table entry. */ /* Nuke the page table entry. */
flush_cache_page(vma, address); flush_cache_page(vma, address, page_to_pfn(page));
pteval = ptep_clear_flush(vma, address, pte); pteval = ptep_clear_flush(vma, address, pte);
/* Move the dirty bit to the physical page now the pte is gone. */ /* Move the dirty bit to the physical page now the pte is gone. */
...@@ -690,7 +690,7 @@ static void try_to_unmap_cluster(unsigned long cursor, ...@@ -690,7 +690,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
continue; continue;
/* Nuke the page table entry. */ /* Nuke the page table entry. */
flush_cache_page(vma, address); flush_cache_page(vma, address, pfn);
pteval = ptep_clear_flush(vma, address, pte); pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */ /* If nonlinear, store the file page offset in the pte. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment