Commit 39e688a9 authored by Paul Mundt's avatar Paul Mundt

sh: Revert lazy dcache writeback changes.

These ended up causing too many problems on older parts,
revert for now..
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent c87a7111
...@@ -156,8 +156,6 @@ void r7780rp_insw(unsigned long port, void *dst, unsigned long count) ...@@ -156,8 +156,6 @@ void r7780rp_insw(unsigned long port, void *dst, unsigned long count)
while (count--) while (count--)
*buf++ = *p; *buf++ = *p;
flush_dcache_all();
} }
void r7780rp_insl(unsigned long port, void *dst, unsigned long count) void r7780rp_insl(unsigned long port, void *dst, unsigned long count)
...@@ -204,8 +202,6 @@ void r7780rp_outsw(unsigned long port, const void *src, unsigned long count) ...@@ -204,8 +202,6 @@ void r7780rp_outsw(unsigned long port, const void *src, unsigned long count)
while (count--) while (count--)
*p = *buf++; *p = *buf++;
flush_dcache_all();
} }
void r7780rp_outsl(unsigned long port, const void *src, unsigned long count) void r7780rp_outsl(unsigned long port, const void *src, unsigned long count)
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_CPU_SH3 #ifdef CONFIG_CPU_SH3
/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a /* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
...@@ -96,7 +95,6 @@ void generic_insw(unsigned long port, void *dst, unsigned long count) ...@@ -96,7 +95,6 @@ void generic_insw(unsigned long port, void *dst, unsigned long count)
while (count--) while (count--)
*buf++ = *port_addr; *buf++ = *port_addr;
flush_dcache_all();
dummy_read(); dummy_read();
} }
...@@ -171,7 +169,6 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count) ...@@ -171,7 +169,6 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count)
while (count--) while (count--)
*port_addr = *buf++; *port_addr = *buf++;
flush_dcache_all();
dummy_read(); dummy_read();
} }
......
...@@ -237,20 +237,10 @@ static inline void flush_cache_4096(unsigned long start, ...@@ -237,20 +237,10 @@ static inline void flush_cache_4096(unsigned long start,
/* /*
* Write back & invalidate the D-cache of the page. * Write back & invalidate the D-cache of the page.
* (To avoid "alias" issues) * (To avoid "alias" issues)
*
* This uses a lazy write-back on UP, which is explicitly
* disabled on SMP.
*/ */
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
#ifndef CONFIG_SMP if (test_bit(PG_mapped, &page->flags)) {
struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
#endif
{
unsigned long phys = PHYSADDR(page_address(page)); unsigned long phys = PHYSADDR(page_address(page));
unsigned long addr = CACHE_OC_ADDRESS_ARRAY; unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
int i, n; int i, n;
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
* *
* Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2004 Alex Song * Copyright (C) 2004 Alex Song
* Copyright (C) 2006 Paul Mundt
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
*
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/mman.h> #include <linux/mman.h>
...@@ -51,6 +51,7 @@ static inline void cache_wback_all(void) ...@@ -51,6 +51,7 @@ static inline void cache_wback_all(void)
if ((data & v) == v) if ((data & v) == v)
ctrl_outl(data & ~v, addr); ctrl_outl(data & ~v, addr);
} }
addrstart += current_cpu_data.dcache.way_incr; addrstart += current_cpu_data.dcache.way_incr;
...@@ -127,11 +128,7 @@ static void __flush_dcache_page(unsigned long phys) ...@@ -127,11 +128,7 @@ static void __flush_dcache_page(unsigned long phys)
*/ */
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); if (test_bit(PG_mapped, &page->flags))
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(PHYSADDR(page_address(page))); __flush_dcache_page(PHYSADDR(page_address(page)));
} }
......
...@@ -23,6 +23,7 @@ extern struct mutex p3map_mutex[]; ...@@ -23,6 +23,7 @@ extern struct mutex p3map_mutex[];
*/ */
void clear_user_page(void *to, unsigned long address, struct page *page) void clear_user_page(void *to, unsigned long address, struct page *page)
{ {
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to); clear_page(to);
else { else {
...@@ -58,6 +59,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) ...@@ -58,6 +59,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
void copy_user_page(void *to, void *from, unsigned long address, void copy_user_page(void *to, void *from, unsigned long address,
struct page *page) struct page *page)
{ {
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from); copy_page(to, from);
else { else {
...@@ -82,3 +84,23 @@ void copy_user_page(void *to, void *from, unsigned long address, ...@@ -82,3 +84,23 @@ void copy_user_page(void *to, void *from, unsigned long address,
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
} }
} }
/*
* For SH-4, we have our own implementation for ptep_get_and_clear
*/
inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(mm, addr, ptep);
if (!pte_not_present(pte)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
struct address_space *mapping = page_mapping(page);
if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
...@@ -7,7 +7,9 @@ ...@@ -7,7 +7,9 @@
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
*
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -74,6 +76,7 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) ...@@ -74,6 +76,7 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
{ {
struct page *page = virt_to_page(to); struct page *page = virt_to_page(to);
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
clear_page(to); clear_page(to);
__flush_wback_region(to, PAGE_SIZE); __flush_wback_region(to, PAGE_SIZE);
...@@ -92,11 +95,12 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) ...@@ -92,11 +95,12 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
* @from: P1 address * @from: P1 address
* @address: U0 address to be mapped * @address: U0 address to be mapped
*/ */
void copy_user_page(void *to, void *from, unsigned long address, void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
struct page *pg)
{ {
struct page *page = virt_to_page(to); struct page *page = virt_to_page(to);
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
copy_page(to, from); copy_page(to, from);
__flush_wback_region(to, PAGE_SIZE); __flush_wback_region(to, PAGE_SIZE);
...@@ -108,3 +112,26 @@ void copy_user_page(void *to, void *from, unsigned long address, ...@@ -108,3 +112,26 @@ void copy_user_page(void *to, void *from, unsigned long address,
__flush_wback_region(to, PAGE_SIZE); __flush_wback_region(to, PAGE_SIZE);
} }
} }
/*
* For SH7705, we have our own implementation for ptep_get_and_clear
* Copied from pg-sh4.c
*/
inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(mm, addr, ptep);
if (!pte_not_present(pte)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
struct address_space *mapping = page_mapping(page);
if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
...@@ -2,17 +2,15 @@ ...@@ -2,17 +2,15 @@
* TLB flushing operations for SH with an MMU. * TLB flushing operations for SH with an MMU.
* *
* Copyright (C) 1999 Niibe Yutaka * Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 - 2006 Paul Mundt * Copyright (C) 2003 Paul Mundt
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
*/ */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/io.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h>
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ {
...@@ -140,54 +138,3 @@ void local_flush_tlb_all(void) ...@@ -140,54 +138,3 @@ void local_flush_tlb_all(void)
ctrl_barrier(); ctrl_barrier();
local_irq_restore(flags); local_irq_restore(flags);
} }
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
unsigned long flags;
unsigned long pteval;
unsigned long vpn;
struct page *page;
unsigned long pfn = pte_pfn(pte);
struct address_space *mapping;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
mapping = page_mapping(page);
if (mapping) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
__flush_wback_region((void *)P1SEGADDR(phys),
PAGE_SIZE);
}
local_irq_save(flags);
/* Set PTEH register */
vpn = (address & MMU_VPN_MASK) | get_asid();
ctrl_outl(vpn, MMU_PTEH);
pteval = pte_val(pte);
#ifdef CONFIG_CPU_HAS_PTEA
/* Set PTEA register */
/* TODO: make this look less hacky */
ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
#endif
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
#if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4)
pteval |= _PAGE_WT;
#endif
/* conveniently, we want all the software flags to be 0 anyway */
ctrl_outl(pteval, MMU_PTEL);
/* Load the TLB */
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
local_irq_restore(flags);
}
...@@ -8,9 +8,69 @@ ...@@ -8,9 +8,69 @@
* *
* Released under the terms of the GNU GPL v2.0. * Released under the terms of the GNU GPL v2.0.
*/ */
#include <linux/io.h> #include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h>
void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
unsigned long flags;
unsigned long pteval;
unsigned long vpn;
/* Ptrace may call this routine. */
if (vma && current->active_mm != vma->vm_mm)
return;
#if defined(CONFIG_SH7705_CACHE_32KB)
{
struct page *page = pte_page(pte);
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys),
PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
}
}
#endif
local_irq_save(flags);
/* Set PTEH register */
vpn = (address & MMU_VPN_MASK) | get_asid();
ctrl_outl(vpn, MMU_PTEH);
pteval = pte_val(pte);
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
/* conveniently, we want all the software flags to be 0 anyway */
ctrl_outl(pteval, MMU_PTEL);
/* Load the TLB */
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
local_irq_restore(flags);
}
void local_flush_tlb_one(unsigned long asid, unsigned long page) void local_flush_tlb_one(unsigned long asid, unsigned long page)
{ {
...@@ -34,3 +94,4 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) ...@@ -34,3 +94,4 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
for (i = 0; i < ways; i++) for (i = 0; i < ways; i++)
ctrl_outl(data, addr + (i << 8)); ctrl_outl(data, addr + (i << 8));
} }
...@@ -8,9 +8,74 @@ ...@@ -8,9 +8,74 @@
* *
* Released under the terms of the GNU GPL v2.0. * Released under the terms of the GNU GPL v2.0.
*/ */
#include <linux/io.h> #include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h>
void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
unsigned long flags;
unsigned long pteval;
unsigned long vpn;
struct page *page;
unsigned long pfn;
/* Ptrace may call this routine. */
if (vma && current->active_mm != vma->vm_mm)
return;
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
}
}
local_irq_save(flags);
/* Set PTEH register */
vpn = (address & MMU_VPN_MASK) | get_asid();
ctrl_outl(vpn, MMU_PTEH);
pteval = pte_val(pte);
/* Set PTEA register */
if (cpu_data->flags & CPU_HAS_PTEA)
/* TODO: make this look less hacky */
ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
#ifdef CONFIG_SH_WRITETHROUGH
pteval |= _PAGE_WT;
#endif
/* conveniently, we want all the software flags to be 0 anyway */
ctrl_outl(pteval, MMU_PTEL);
/* Load the TLB */
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
local_irq_restore(flags);
}
void local_flush_tlb_one(unsigned long asid, unsigned long page) void local_flush_tlb_one(unsigned long asid, unsigned long page)
{ {
...@@ -28,3 +93,4 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) ...@@ -28,3 +93,4 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
ctrl_outl(data, addr); ctrl_outl(data, addr);
back_to_P1(); back_to_P1();
} }
...@@ -30,8 +30,5 @@ extern void __flush_invalidate_region(void *start, int size); ...@@ -30,8 +30,5 @@ extern void __flush_invalidate_region(void *start, int size);
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
/* Page flag for lazy dcache write-back for the aliasing UP caches */
#define PG_dcache_dirty PG_arch_1
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */ #endif /* __ASM_SH_CACHEFLUSH_H */
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000 #define CACHE_ALIAS 0x00001000
#define PG_mapped PG_arch_1
void flush_cache_all(void); void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm); void flush_cache_mm(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
......
...@@ -39,4 +39,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -39,4 +39,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
/* Initialization of P3 area for copy_user_page */ /* Initialization of P3 area for copy_user_page */
void p3_cache_init(void); void p3_cache_init(void);
#define PG_mapped PG_arch_1
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
...@@ -583,6 +583,11 @@ struct mm_struct; ...@@ -583,6 +583,11 @@ struct mm_struct;
extern unsigned int kobjsize(const void *objp); extern unsigned int kobjsize(const void *objp);
#endif /* !CONFIG_MMU */ #endif /* !CONFIG_MMU */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void); extern void paging_init(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment