Commit 26ff6c11 authored by Paul Mundt's avatar Paul Mundt

sh: page table alloc cleanups and page fault optimizations.

Cleanup of page table allocators, using generic folded PMD and PUD
helpers. TLB flushing operations are moved to a more sensible spot.

The page fault handler is also optimized slightly, we no longer waste
cycles on IRQ disabling for flushing of the page from the ITLB, since
we're already under CLI protection by the initial exception handler.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 9359e757
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ipc.h> #include <asm/ipc.h>
......
...@@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o ...@@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o
obj-y += $(mmu-y) obj-y += $(mmu-y)
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
*/ */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
#include <asm/io.h> #include <asm/io.h>
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
......
/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ /*
* Page fault handler for SH with an MMU.
* *
* linux/arch/sh/mm/fault.c
* Copyright (C) 1999 Niibe Yutaka * Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt * Copyright (C) 2003 Paul Mundt
* *
* Based on linux/arch/i386/mm/fault.c: * Based on linux/arch/i386/mm/fault.c:
* Copyright (C) 1995 Linus Torvalds * Copyright (C) 1995 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/ */
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/kgdb.h> #include <asm/kgdb.h>
extern void die(const char *,struct pt_regs *,long); extern void die(const char *,struct pt_regs *,long);
...@@ -187,14 +174,25 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, ...@@ -187,14 +174,25 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
goto no_context; goto no_context;
} }
#ifdef CONFIG_SH_STORE_QUEUES
/* /*
* Called with interrupt disabled. * This is a special case for the SH-4 store queues, as pages for this
* space still need to be faulted in before it's possible to flush the
* store queue cache for writeout to the remapped region.
*/
#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
#else
#define P3_ADDR_MAX P4SEG
#endif
/*
* Called with interrupts disabled.
*/ */
asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address) unsigned long address)
{ {
unsigned long addrmax = P4SEG;
pgd_t *pgd; pgd_t *pgd;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
pte_t entry; pte_t entry;
...@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, ...@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
kgdb_bus_err_hook(); kgdb_bus_err_hook();
#endif #endif
#ifdef CONFIG_SH_STORE_QUEUES /*
addrmax = P4SEG_STORE_QUE + 0x04000000; * We don't take page faults for P1, P2, and parts of P4, these
#endif * are always mapped, whether it be due to legacy behaviour in
* 29-bit mode, or due to PMB configuration in 32-bit mode.
if (address >= P3SEG && address < addrmax) { */
if (address >= P3SEG && address < P3_ADDR_MAX)
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
mm = NULL; else {
} else if (address >= TASK_SIZE) if (unlikely(address >= TASK_SIZE || !current->mm))
return 1; return 1;
else if (!(mm = current->mm))
return 1; pgd = pgd_offset(current->mm, address);
else }
pgd = pgd_offset(mm, address);
pmd = pmd_offset(pgd, address); pud = pud_offset(pgd, address);
if (pud_none_or_clear_bad(pud))
return 1;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return 1; return 1;
if (mm) if (mm)
pte = pte_offset_map_lock(mm, pmd, address, &ptl); pte = pte_offset_map_lock(mm, pmd, address, &ptl);
else else
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
entry = *pte; entry = *pte;
if (pte_none(entry) || pte_not_present(entry) if (unlikely(pte_none(entry) || pte_not_present(entry)))
|| (writeaccess && !pte_write(entry))) goto unlock;
if (unlikely(writeaccess && !pte_write(entry)))
goto unlock; goto unlock;
if (writeaccess) if (writeaccess)
...@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, ...@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* ITLB is not affected by "ldtlb" instruction. * ITLB is not affected by "ldtlb" instruction.
* So, we need to flush the entry by ourselves. * So, we need to flush the entry by ourselves.
*/ */
__flush_tlb_page(get_asid(), address & PAGE_MASK);
{
unsigned long flags;
local_irq_save(flags);
__flush_tlb_page(get_asid(), address&PAGE_MASK);
local_irq_restore(flags);
}
#endif #endif
set_pte(pte, entry); set_pte(pte, entry);
...@@ -260,122 +257,3 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, ...@@ -260,122 +257,3 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
return ret; return ret;
} }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
unsigned long flags;
unsigned long asid;
unsigned long saved_asid = MMU_NO_ASID;
asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
page &= PAGE_MASK;
local_irq_save(flags);
if (vma->vm_mm != current->mm) {
saved_asid = get_asid();
set_asid(asid);
}
__flush_tlb_page(asid, page);
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
local_irq_restore(flags);
}
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm);
} else {
unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
unsigned long saved_asid = MMU_NO_ASID;
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
if (mm != current->mm) {
saved_asid = get_asid();
set_asid(asid);
}
while (start < end) {
__flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
}
local_irq_restore(flags);
}
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
flush_tlb_all();
} else {
unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
unsigned long saved_asid = get_asid();
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
set_asid(asid);
while (start < end) {
__flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
set_asid(saved_asid);
}
local_irq_restore(flags);
}
void flush_tlb_mm(struct mm_struct *mm)
{
/* Invalidate all TLB of this process. */
/* Instead of invalidating each TLB, we get new MMU context. */
if (mm->context != NO_CONTEXT) {
unsigned long flags;
local_irq_save(flags);
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm);
local_irq_restore(flags);
}
}
void flush_tlb_all(void)
{
unsigned long flags, status;
/*
* Flush all the TLB.
*
* Write to the MMU control register's bit:
* TF-bit for SH-3, TI-bit for SH-4.
* It's same position, bit #2.
*/
local_irq_save(flags);
status = ctrl_inl(MMUCR);
status |= 0x04;
ctrl_outl(status, MMUCR);
ctrl_barrier();
local_irq_restore(flags);
}
...@@ -80,6 +80,7 @@ void show_mem(void) ...@@ -80,6 +80,7 @@ void show_mem(void)
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
...@@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) ...@@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
return; return;
} }
pmd = pmd_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
if (pmd != pmd_offset(pud, 0)) {
pud_ERROR(*pud);
return;
}
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
......
/*
* TLB flushing operations for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
unsigned long flags;
unsigned long asid;
unsigned long saved_asid = MMU_NO_ASID;
asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
page &= PAGE_MASK;
local_irq_save(flags);
if (vma->vm_mm != current->mm) {
saved_asid = get_asid();
set_asid(asid);
}
__flush_tlb_page(asid, page);
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
local_irq_restore(flags);
}
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm);
} else {
unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
unsigned long saved_asid = MMU_NO_ASID;
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
if (mm != current->mm) {
saved_asid = get_asid();
set_asid(asid);
}
while (start < end) {
__flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
}
local_irq_restore(flags);
}
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
flush_tlb_all();
} else {
unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
unsigned long saved_asid = get_asid();
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
set_asid(asid);
while (start < end) {
__flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
set_asid(saved_asid);
}
local_irq_restore(flags);
}
void flush_tlb_mm(struct mm_struct *mm)
{
/* Invalidate all TLB of this process. */
/* Instead of invalidating each TLB, we get new MMU context. */
if (mm->context != NO_CONTEXT) {
unsigned long flags;
local_irq_save(flags);
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm);
local_irq_restore(flags);
}
}
void flush_tlb_all(void)
{
unsigned long flags, status;
/*
* Flush all the TLB.
*
* Write to the MMU control register's bit:
* TF-bit for SH-3, TI-bit for SH-4.
* It's same position, bit #2.
*/
local_irq_save(flags);
status = ctrl_inl(MMUCR);
status |= 0x04;
ctrl_outl(status, MMUCR);
ctrl_barrier();
local_irq_restore(flags);
}
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/cpu/cache.h> #include <asm/cpu/cache.h>
#include <asm/cpu/cacheflush.h>
#define SH_CACHE_VALID 1 #define SH_CACHE_VALID 1
#define SH_CACHE_UPDATED 2 #define SH_CACHE_UPDATED 2
...@@ -49,12 +48,5 @@ struct cache_info { ...@@ -49,12 +48,5 @@ struct cache_info {
unsigned long flags; unsigned long flags;
}; };
/* Flush (write-back only) a region (smaller than a page) */
extern void __flush_wback_region(void *start, int size);
/* Flush (write-back & invalidate) a region (smaller than a page) */
extern void __flush_purge_region(void *start, int size);
/* Flush (invalidate only) a region (smaller than a page) */
extern void __flush_invalidate_region(void *start, int size);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */ #endif /* __ASM_SH_CACHE_H */
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __ASM_SH_CACHEFLUSH_H #define __ASM_SH_CACHEFLUSH_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/mm.h>
#include <asm/cpu/cacheflush.h> #include <asm/cpu/cacheflush.h>
/* Flush (write-back only) a region (smaller than a page) */ /* Flush (write-back only) a region (smaller than a page) */
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#ifndef __ASM_CPU_SH3_CACHEFLUSH_H #ifndef __ASM_CPU_SH3_CACHEFLUSH_H
#define __ASM_CPU_SH3_CACHEFLUSH_H #define __ASM_CPU_SH3_CACHEFLUSH_H
/* /*
* Cache flushing: * Cache flushing:
* *
* - flush_cache_all() flushes entire cache * - flush_cache_all() flushes entire cache
...@@ -35,10 +35,6 @@ ...@@ -35,10 +35,6 @@
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000 #define CACHE_ALIAS 0x00001000
struct page;
struct mm_struct;
struct vm_area_struct;
extern void flush_cache_all(void); extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
...@@ -79,8 +75,6 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); ...@@ -79,8 +75,6 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define p3_cache_init() do { } while (0) #define p3_cache_init() do { } while (0)
#define HAVE_ARCH_UNMAPPED_AREA
#endif #endif
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
......
...@@ -16,30 +16,26 @@ ...@@ -16,30 +16,26 @@
* caching; in which case they're only semi-broken), * caching; in which case they're only semi-broken),
* so we need them. * so we need them.
*/ */
struct page; void flush_cache_all(void);
struct mm_struct; void flush_cache_mm(struct mm_struct *mm);
struct vm_area_struct; void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_cache_all(void); void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
extern void flush_cache_mm(struct mm_struct *mm); unsigned long pfn);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, void flush_dcache_page(struct page *pg);
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_cache_sigtramp(unsigned long addr); void flush_cache_sigtramp(unsigned long addr);
extern void flush_icache_user_range(struct vm_area_struct *vma, void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
struct page *page, unsigned long addr, unsigned long addr, int len);
int len);
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
/* Initialization of P3 area for copy_user_page */ /* Initialization of P3 area for copy_user_page */
extern void p3_cache_init(void); void p3_cache_init(void);
#define PG_mapped PG_arch_1 #define PG_mapped PG_arch_1
...@@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr, ...@@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
...@@ -41,7 +41,8 @@ extern void (*copy_page)(void *to, void *from); ...@@ -41,7 +41,8 @@ extern void (*copy_page)(void *to, void *from);
extern void clear_page_slow(void *to); extern void clear_page_slow(void *to);
extern void copy_page_slow(void *to, void *from); extern void copy_page_slow(void *to, void *from);
#if defined(CONFIG_SH7705_CACHE_32KB) && defined(CONFIG_MMU) #if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
defined(CONFIG_SH7705_CACHE_32KB))
struct page; struct page;
extern void clear_user_page(void *to, unsigned long address, struct page *pg); extern void clear_user_page(void *to, unsigned long address, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
...@@ -50,29 +51,20 @@ extern void __copy_user_page(void *to, void *from, void *orig_to); ...@@ -50,29 +51,20 @@ extern void __copy_user_page(void *to, void *from, void *orig_to);
#elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) #elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU)
#define clear_user_page(page, vaddr, pg) clear_page(page) #define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#elif defined(CONFIG_CPU_SH4)
struct page;
extern void clear_user_page(void *to, unsigned long address, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
extern void __clear_user_page(void *to, void *orig_to);
extern void __copy_user_page(void *to, void *from, void *orig_to);
#endif #endif
/* /*
* These are used to make use of C type-checking.. * These are used to make use of C type-checking..
*/ */
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
......
#ifndef __ASM_SH_PGALLOC_H #ifndef __ASM_SH_PGALLOC_H
#define __ASM_SH_PGALLOC_H #define __ASM_SH_PGALLOC_H
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/mm.h>
#define pgd_quicklist ((unsigned long *)0)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist ((unsigned long *)0)
#define pgtable_cache_size 0L
#define pmd_populate_kernel(mm, pmd, pte) \ #define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
...@@ -24,38 +15,24 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, ...@@ -24,38 +15,24 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
*/ */
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
if (pgd)
memset(pgd, 0, pgd_size);
return pgd;
} }
static inline void pgd_free(pgd_t *pgd) static inline void pgd_free(pgd_t *pgd)
{ {
kfree(pgd); free_page((unsigned long)pgd);
} }
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
pte_t *pte; return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
return pte;
} }
static inline struct page *pte_alloc_one(struct mm_struct *mm, static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *pte; return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
return pte;
} }
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
...@@ -75,14 +52,8 @@ static inline void pte_free(struct page *pte) ...@@ -75,14 +52,8 @@ static inline void pte_free(struct page *pte)
* inside the pgd, so has no extra memory associated with it. * inside the pgd, so has no extra memory associated with it.
*/ */
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
#ifdef CONFIG_CPU_SH4
#define PG_mapped PG_arch_1
#endif
#endif /* __ASM_SH_PGALLOC_H */ #endif /* __ASM_SH_PGALLOC_H */
#ifndef __ASM_SH_PGTABLE_H
#define __ASM_SH_PGTABLE_H
#include <asm-generic/4level-fixup.h>
/* /*
* This file contains the functions and defines necessary to modify and
* use the SuperH page table tree.
*
* Copyright (C) 1999 Niibe Yutaka * Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002, 2003, 2004 Paul Mundt * Copyright (C) 2002 - 2005 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*/ */
#ifndef __ASM_SH_PGTABLE_H
#define __ASM_SH_PGTABLE_H
#include <asm/pgtable-2level.h> #include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#define PTRS_PER_PGD 1024
/*
* This file contains the functions and defines necessary to modify and use
* the SuperH page table tree.
*/
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <linux/threads.h>
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void); extern void paging_init(void);
/*
* Basically we have the same two-level (which is the logical three level
* Linux page table layout folded) page tables as the i386.
*/
/* /*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
*/ */
extern unsigned long empty_zero_page[1024]; extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* traditional two-level paging structure */
#define PGDIR_SHIFT 22
#define PTRS_PER_PMD 1
#define PTRS_PER_PTE 1024
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
...@@ -47,7 +47,6 @@ extern unsigned long empty_zero_page[1024]; ...@@ -47,7 +47,6 @@ extern unsigned long empty_zero_page[1024];
#define PTE_PHYS_MASK 0x1ffff000 #define PTE_PHYS_MASK 0x1ffff000
#ifndef __ASSEMBLY__
/* /*
* First 1MB map is used by fixed purpose. * First 1MB map is used by fixed purpose.
* Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
...@@ -65,7 +64,7 @@ extern unsigned long empty_zero_page[1024]; ...@@ -65,7 +64,7 @@ extern unsigned long empty_zero_page[1024];
#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ #define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
#define _PAGE_PROTNONE 0x200 /* software: if not present */ #define _PAGE_PROTNONE 0x200 /* software: if not present */
#define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_ACCESSED 0x400 /* software: page referenced */
#define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */ #define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */
#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
...@@ -83,7 +82,6 @@ extern unsigned long empty_zero_page[1024]; ...@@ -83,7 +82,6 @@ extern unsigned long empty_zero_page[1024];
#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
/* Mask which drop software flags /* Mask which drop software flags
* We also drop WT bit since it is used for _PAGE_FILE * We also drop WT bit since it is used for _PAGE_FILE
* bit in this implementation. * bit in this implementation.
...@@ -115,6 +113,8 @@ extern unsigned long empty_zero_page[1024]; ...@@ -115,6 +113,8 @@ extern unsigned long empty_zero_page[1024];
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED)
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD)
...@@ -137,12 +137,13 @@ extern unsigned long empty_zero_page[1024]; ...@@ -137,12 +137,13 @@ extern unsigned long empty_zero_page[1024];
#define PAGE_KERNEL_PCC __pgprot(0) #define PAGE_KERNEL_PCC __pgprot(0)
#endif #endif
#endif /* __ASSEMBLY__ */
/* /*
* As i386 and MIPS, SuperH can't do page protection for execute, and * As i386 and MIPS, SuperH can't do page protection for execute, and
* considers that the same as a read. Also, write permissions imply * considers that the same as a read. Also, write permissions imply
* read permissions. This is the closest we can get.. * read permissions. This is the closest we can get..
*/ */
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_READONLY #define __P001 PAGE_READONLY
#define __P010 PAGE_COPY #define __P010 PAGE_COPY
...@@ -161,6 +162,26 @@ extern unsigned long empty_zero_page[1024]; ...@@ -161,6 +162,26 @@ extern unsigned long empty_zero_page[1024];
#define __S110 PAGE_SHARED #define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED #define __S111 PAGE_SHARED
#ifndef __ASSEMBLY__
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_none(x) (!pte_val(x)) #define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
...@@ -171,7 +192,7 @@ extern unsigned long empty_zero_page[1024]; ...@@ -171,7 +192,7 @@ extern unsigned long empty_zero_page[1024];
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
/* /*
* The following only work if pte_present() is true. * The following only work if pte_present() is true.
...@@ -248,6 +269,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -248,6 +269,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0)
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
struct vm_area_struct; struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma, extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte); unsigned long address, pte_t pte);
...@@ -272,8 +298,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma, ...@@ -272,8 +298,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;
#endif /* !__ASSEMBLY__ */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
...@@ -301,5 +325,7 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t ...@@ -301,5 +325,7 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_SH_PAGE_H */ #endif /* __ASM_SH_PAGE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment