Commit a77eaeda authored by Russell King's avatar Russell King

Fix up auto merge problems.

parents bc9d1c71 5a8202f0
/*
* linux/arch/arm/mm/init.c
*
* Copyright (C) 1995-2000 Russell King
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -46,7 +46,7 @@
#define TABLE_OFFSET 0
#endif
#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(void *))
#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(pte_t))
static unsigned long totalram_pages;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
......@@ -319,7 +319,7 @@ static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int boot
* and can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
PTRS_PER_PGD * sizeof(void *));
PTRS_PER_PGD * sizeof(pgd_t));
#endif
/*
* And don't forget to reserve the allocator bitmap,
......
/*
* linux/arch/arm/mm/mm-armv.c
*
* Copyright (C) 1998-2000 Russell King
* Copyright (C) 1998-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -82,9 +82,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
init_pgd = pgd_offset_k(0);
if (vectors_base() == 0) {
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset(init_pmd, 0);
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
......@@ -172,11 +169,14 @@ void free_pgd_slow(pgd_t *pgd)
static inline void
alloc_init_section(unsigned long virt, unsigned long phys, int prot)
{
pmd_t pmd;
pmd_t *pmdp, pmd;
pmd_val(pmd) = phys | prot;
pmdp = pmd_offset(pgd_offset_k(virt), virt);
if (virt & (1 << PMD_SHIFT))
pmdp++;
set_pmd(pmd_offset(pgd_offset_k(virt), virt), pmd);
pmd_val(pmd) = phys | prot;
set_pmd(pmdp, pmd);
}
/*
......@@ -189,18 +189,19 @@ alloc_init_section(unsigned long virt, unsigned long phys, int prot)
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
{
pmd_t *pmdp;
pmd_t *pmdp, pmd;
pte_t *ptep;
pmdp = pmd_offset(pgd_offset_k(virt), virt);
if (pmd_none(*pmdp)) {
pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
sizeof(pte_t));
ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
sizeof(pte_t));
ptep += PTRS_PER_PTE;
set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
pmd_val(pmd) = __pa(ptep) | PMD_TYPE_TABLE | PMD_DOMAIN(domain);
set_pmd(pmdp, pmd);
pmd_val(pmd) += 256 * sizeof(pte_t);
set_pmd(pmdp + 1, pmd);
}
ptep = pte_offset_kernel(pmdp, virt);
......@@ -266,11 +267,11 @@ static void __init create_mapping(struct map_desc *md)
length -= PAGE_SIZE;
}
while (length >= PGDIR_SIZE) {
while (length >= (PGDIR_SIZE / 2)) {
alloc_init_section(virt, virt + off, prot_sect);
virt += PGDIR_SIZE;
length -= PGDIR_SIZE;
virt += (PGDIR_SIZE / 2);
length -= (PGDIR_SIZE / 2);
}
while (length >= PAGE_SIZE) {
......@@ -463,41 +464,3 @@ void __init create_memmap_holes(struct meminfo *mi)
for (node = 0; node < numnodes; node++)
free_unused_memmap_node(node, mi);
}
/*
* PTE table allocation cache.
*
* This is a move away from our custom 2K page allocator. We now use the
* slab cache to keep track of these objects.
*
* With this, it is questionable as to whether the PGT cache gains us
* anything. We may be better off dropping the PTE stuff from our PGT
* cache implementation.
*/
kmem_cache_t *pte_cache;
/*
* The constructor gets called for each object within the cache when the
* cache page is created. Note that if slab tries to misalign the blocks,
* we BUG() loudly.
*/
static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
{
unsigned long block = (unsigned long)pte;
if (block & 2047)
BUG();
memzero(pte, 2 * PTRS_PER_PTE * sizeof(pte_t));
cpu_cache_clean_invalidate_range(block, block +
PTRS_PER_PTE * sizeof(pte_t), 0);
}
void __init pgtable_cache_init(void)
{
pte_cache = kmem_cache_create("pte-cache",
2 * PTRS_PER_PTE * sizeof(pte_t), 0, 0,
pte_cache_ctor, NULL);
if (!pte_cache)
BUG();
}
......@@ -499,7 +499,9 @@ ENTRY(cpu_arm1020_set_pmd)
*/
.align 5
ENTRY(cpu_arm1020_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
......
......@@ -274,7 +274,9 @@ ENTRY(cpu_arm7_set_pmd)
.align 5
ENTRY(cpu_arm6_set_pte)
ENTRY(cpu_arm7_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
......
......@@ -136,7 +136,9 @@ ENTRY(cpu_arm720_set_pmd)
*/
.align 5
ENTRY(cpu_arm720_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
......
......@@ -420,7 +420,9 @@ ENTRY(cpu_arm920_set_pmd)
*/
.align 5
ENTRY(cpu_arm920_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
......
......@@ -421,7 +421,9 @@ ENTRY(cpu_arm922_set_pmd)
*/
.align 5
ENTRY(cpu_arm922_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
......
......@@ -443,7 +443,9 @@ ENTRY(cpu_arm926_set_pmd)
*/
.align 5
ENTRY(cpu_arm926_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
......
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2000 Russell King
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -468,7 +468,9 @@ ENTRY(cpu_sa1100_set_pmd)
.align 5
ENTRY(cpu_sa110_set_pte)
ENTRY(cpu_sa1100_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
......
......@@ -602,7 +602,9 @@ ENTRY(cpu_xscale_set_pmd)
*/
.align 5
ENTRY(cpu_xscale_set_pte)
str r1, [r0], #-1024 @ linux version
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0xff0
orr r2, r2, #PTE_TYPE_EXT @ extended page
......
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
#include <asm/proc/page.h>
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <linux/config.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
......@@ -58,17 +55,14 @@ extern void copy_page(void *to, void *from);
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#else
......@@ -77,25 +71,29 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif
#endif /* STRICT_MM_TYPECHECKS */
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#include <asm/proc/page.h>
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#ifdef CONFIG_DEBUG_BUGVERBOSE
......@@ -129,7 +127,6 @@ static inline int get_order(unsigned long size)
#endif /* !__ASSEMBLY__ */
#include <linux/config.h>
#include <asm/arch/memory.h>
#define __pa(x) __virt_to_phys((unsigned long)(x))
......@@ -144,6 +141,6 @@ static inline int get_order(unsigned long size)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif
#endif /* __KERNEL__ */
#endif
/*
* linux/include/asm-arm/pgtable.h
*
* Copyright (C) 2000-2001 Russell King
* Copyright (C) 2000-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -19,7 +19,11 @@
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#define PMD_SHIFT 20
#ifdef CONFIG_CPU_32
#define PGDIR_SHIFT 21
#else
#define PGDIR_SHIFT 20
#endif
#define LIBRARY_TEXT_START 0x0c000000
......@@ -93,7 +97,6 @@ extern struct page *empty_zero_page;
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
/*
* Permanent address of a page. We never have highmem, so this is trivial.
......@@ -106,18 +109,10 @@ extern struct page *empty_zero_page;
*/
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = physpage | pgprot_val(pgprot);
return pte;
return __pte(physpage | pgprot_val(pgprot));
}
#define mk_pte(page,pgprot) \
({ \
pte_t __pte; \
pte_val(__pte) = __pa(page_address(page)) + \
pgprot_val(pgprot); \
__pte; \
})
#define mk_pte(page,pgprot) mk_pte_phys(__pa(page_address(page)), pgprot)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
......@@ -127,7 +122,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_present(pgd) (1)
#define pgd_clear(pgdp)
#define pgd_clear(pgdp) do { } while (0)
#define page_pte_prot(page,prot) mk_pte(page, prot)
#define page_pte(page) mk_pte(page, __pgprot(0))
......@@ -147,15 +142,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
/* Find an entry in the third-level page table.. */
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pmd_page(dir) ((struct page *)__pmd_page(dir))
#define __pte_offset(dir, addr) ((pte_t *)__pmd_page(*(dir)) + __pte_index(addr))
#define pte_offset_kernel __pte_offset
#define pte_offset_map __pte_offset
#define pte_offset_map_nested __pte_offset
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#include <asm/proc/pgtable.h>
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
......@@ -182,8 +168,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include <asm-generic/pgtable.h>
extern void pgtable_cache_init(void);
/*
* remap a physical address `phys' of size `size' with page protection `prot'
* into virtual address `from'
......
/*
* linux/include/asm-arm/proc-armo/page.h
*
* Copyright (C) 1995, 1996 Russell King
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -21,4 +21,20 @@
#define EXEC_PAGESIZE 32768
#ifndef __ASSEMBLY__
#ifdef STRICT_MM_TYPECHECKS
typedef struct { unsigned long pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#else
typedef unsigned long pgd_t;
#define pgd_val(x) (x)
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PAGE_H */
/*
* linux/include/asm-arm/proc-armo/pgalloc.h
*
* Copyright (C) 2001 Russell King
* Copyright (C) 2001-2002 Russell King
*
* Page table allocation/freeing primitives for 26-bit ARM processors.
*/
/* unfortunately, this includes linux/mm.h and the rest of the universe. */
#include <linux/slab.h>
extern kmem_cache_t *pte_cache;
/*
* Allocate one PTE table.
*
* Note that we keep the processor copy of the PTE entries separate
* from the Linux copy. The processor copies are offset by -PTRS_PER_PTE
* words from the Linux copy.
*/
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(pte_cache, GFP_KERNEL);
}
/*
* Free one PTE table.
*/
static inline void pte_free_slow(pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
if (pte)
kmem_cache_free(pte_cache, pte);
......@@ -39,9 +29,16 @@ static inline void pte_free_slow(pte_t *pte)
* If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
* need to set stuff up correctly for it.
*/
#define pmd_populate(mm,pmdp,pte) \
do { \
set_pmd(pmdp, __mk_pmd(pte, _PAGE_TABLE)); \
} while (0)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
set_pmd(pmdp, __mk_pmd(ptep, _PAGE_TABLE));
}
/*
* We use the old 2.5.5-rmk1 hack for this.
* This is not truely correct, but should be functional.
*/
#define pte_alloc_one(mm,addr) ((struct page *)pte_alloc_one_kernel(mm,addr))
#define pte_free(pte) pte_free_kernel((pte_t *)pte)
#define pmd_populate(mm,pmdp,ptep) pmd_populate_kernel(mm,pmdp,(pte_t *)ptep)
/*
* linux/include/asm-arm/proc-armo/pgtable.h
*
* Copyright (C) 1995-2001 Russell King
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -32,6 +32,7 @@
#define pmd_bad(pmd) ((pmd_val(pmd) & 0xfc000002))
#define set_pmd(pmdp,pmd) ((*(pmdp)) = (pmd))
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
{
......@@ -48,6 +49,12 @@ static inline unsigned long pmd_page(pmd_t pmd)
return __phys_to_virt(pmd_val(pmd) & ~_PAGE_TABLE);
}
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define _PAGE_PRESENT 0x01
......@@ -89,11 +96,11 @@ static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NOT_USE
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) &= ~_PAGE_CLEAN; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) &= ~_PAGE_OLD; return pte; }
#define pte_alloc_kernel pte_alloc
/*
* We don't store cache state bits in the page table here.
*/
#define pgprot_noncached(prot) (prot)
extern void pgtable_cache_init(void);
#endif /* __ASM_PROC_PGTABLE_H */
/*
* linux/include/asm-arm/proc-armv/page.h
*
* Copyright (C) 1995, 1996 Russell King
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -15,4 +15,23 @@
#define EXEC_PAGESIZE 4096
#ifndef __ASSEMBLY__
#ifdef STRICT_MM_TYPECHECKS
typedef struct {
unsigned long pgd0;
unsigned long pgd1;
} pgd_t;
#define pgd_val(x) ((x).pgd0)
#else
typedef unsigned long pgd_t[2];
#define pgd_val(x) ((x)[0])
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PAGE_H */
/*
* linux/include/asm-arm/proc-armv/pgalloc.h
*
* Copyright (C) 2001 Russell King
* Copyright (C) 2001-2002 Russell King
*
* Page table allocation/freeing primitives for 32-bit ARM processors.
*/
/* unfortunately, this includes linux/mm.h and the rest of the universe. */
#include <linux/slab.h>
extern kmem_cache_t *pte_cache;
#include "pgtable.h"
/*
* Allocate one PTE table.
*
* Note that we keep the processor copy of the PTE entries separate
* from the Linux copy. The processor copies are offset by -PTRS_PER_PTE
* words from the Linux copy.
* This actually allocates two hardware PTE tables, but we wrap this up
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
int count = 0;
pte_t *pte;
pte = kmem_cache_alloc(pte_cache, GFP_KERNEL);
if (pte)
do {
pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (!pte) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
if (pte) {
clear_page(pte);
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
pte += PTRS_PER_PTE;
}
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
struct page *pte;
int count = 0;
pte = kmem_cache_alloc(pte_cache, GFP_KERNEL);
if (pte)
pte += PTRS_PER_PTE;
return (struct page *)pte;
do {
pte = alloc_pages(GFP_KERNEL, 0);
if (!pte) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
if (pte) {
void *page = page_address(pte);
clear_page(page);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
}
return pte;
}
/*
......@@ -47,34 +76,49 @@ static inline void pte_free_kernel(pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
kmem_cache_free(pte_cache, pte);
free_page((unsigned long)pte);
}
}
static inline void pte_free(struct page *pte)
{
pte_t *_pte = (pte_t *)pte;
if (pte) {
_pte -= PTRS_PER_PTE;
kmem_cache_free(pte_cache, _pte);
}
__free_page(pte);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
* need to set stuff up correctly for it.
* Ensure that we always set both PMD entries.
*/
#define pmd_populate_kernel(mm,pmdp,pte) \
do { \
BUG_ON(mm != &init_mm); \
set_pmd(pmdp, __mk_pmd(pte, _PAGE_KERNEL_TABLE));\
} while (0)
#define pmd_populate(mm,pmdp,pte) \
do { \
BUG_ON(mm == &init_mm); \
set_pmd(pmdp, __mk_pmd(pte, _PAGE_USER_TABLE)); \
} while (0)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
unsigned long pte_ptr = (unsigned long)ptep;
pmd_t pmd;
BUG_ON(mm != &init_mm);
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pte_ptr -= PTRS_PER_PTE * sizeof(void *);
pmd_val(pmd) = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
set_pmd(pmdp, pmd);
pmd_val(pmd) += 256 * sizeof(pte_t);
set_pmd(pmdp + 1, pmd);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
{
pmd_t pmd;
BUG_ON(mm == &init_mm);
pmd_val(pmd) = __pa(page_address(ptep)) | _PAGE_USER_TABLE;
set_pmd(pmdp, pmd);
pmd_val(pmd) += 256 * sizeof(pte_t);
set_pmd(pmdp + 1, pmd);
}
/*
* linux/include/asm-arm/proc-armv/pgtable.h
*
* Copyright (C) 1995-2001 Russell King
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -16,12 +16,17 @@
#define __ASM_PROC_PGTABLE_H
/*
* entries per page directory level: they are two-level, so
* we don't really have any PMD directory.
* We pull a couple of tricks here:
* 1. We wrap the PMD into the PGD.
* 2. We lie about the size of the PTE and PGD.
* Even though we have 256 PTE entries and 4096 PGD entries, we tell
* Linux that we actually have 512 PTE entries and 2048 PGD entries.
* Each "Linux" PGD entry is made up of two hardware PGD entries, and
* each PTE table is actually two hardware PTE tables.
*/
#define PTRS_PER_PTE 256
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 4096
#define PTRS_PER_PGD 2048
/*
* Hardware page table definitions.
......@@ -109,33 +114,30 @@
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) cpu_set_pmd(pmdp, pmd)
static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
static inline void pmd_clear(pmd_t *pmdp)
{
unsigned long pte_ptr = (unsigned long)ptep;
pmd_t pmd;
pte_ptr -= PTRS_PER_PTE * sizeof(void *);
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot;
return pmd;
set_pmd(pmdp, __pmd(0));
set_pmd(pmdp + 1, __pmd(0));
}
static inline unsigned long __pmd_page(pmd_t pmd)
static inline pte_t *pmd_page_kernel(pmd_t pmd)
{
unsigned long ptr;
ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
ptr += PTRS_PER_PTE * sizeof(void *);
return __phys_to_virt(ptr);
return __va(ptr);
}
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
/*
......@@ -183,6 +185,8 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
#define pgtable_cache_init() do { } while (0)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PGTABLE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment