Commit 361739c8 authored by Geert Uytterhoeven's avatar Geert Uytterhoeven

Merge branch 'pgtable-layout-rewrite' into for-v5.7

Peter Zijlstra <peterz@infradead.org> wrote:

   "In order to facilitate Will's READ_ONCE() patches, we need to fix
    m68k/motorola to not have a giant pmd_t. These patches do so and are
    tested using ARAnyM/68040."
parents 98d54f81 de9e354e
......@@ -28,21 +28,22 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
return (pmd_t *) pgd;
}
#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
(unsigned long)(page_address(page)))
#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
#define pmd_populate_kernel pmd_populate
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
struct page *page = virt_to_page(pgtable);
pgtable_pte_page_dtor(page);
__free_page(page);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte;
......@@ -54,20 +55,16 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
return NULL;
}
pte = kmap(page);
if (pte) {
clear_page(pte);
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
kunmap(page);
pte = page_address(page);
clear_page(pte);
return page;
return pte;
}
static inline void pte_free(struct mm_struct *mm, struct page *page)
static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
struct page *page = virt_to_page(pgtable);
pgtable_pte_page_dtor(page);
__free_page(page);
}
......@@ -90,7 +87,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
if (!new_pgd)
return NULL;
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
return new_pgd;
}
......
......@@ -5,93 +5,71 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
extern void mmu_page_ctor(void *page);
extern void mmu_page_dtor(void *page);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
enum m68k_table_types {
TABLE_PGD = 0,
TABLE_PMD = 0, /* same size as PGD */
TABLE_PTE = 1,
};
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte) {
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
extern void init_pointer_table(void *table, int type);
extern void *get_pointer_table(int type);
extern int free_pointer_table(void *table, int type);
return pte;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return get_pointer_table(TABLE_PTE);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
cache_page(pte);
free_page((unsigned long) pte);
free_pointer_table(pte, TABLE_PTE);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
pte_t *pte;
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
pte = kmap(page);
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
kunmap(page);
return page;
return get_pointer_table(TABLE_PTE);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t page)
static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
free_pointer_table(pgtable, TABLE_PTE);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
free_pointer_table(pgtable, TABLE_PTE);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
return get_pointer_table();
return get_pointer_table(TABLE_PMD);
}
static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
return free_pointer_table(pmd);
return free_pointer_table(pmd, TABLE_PMD);
}
static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
return free_pointer_table(pmd);
return free_pointer_table(pmd, TABLE_PMD);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pmd_free(mm, (pmd_t *)pgd);
free_pointer_table(pgd, TABLE_PGD);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return (pgd_t *)get_pointer_table();
return get_pointer_table(TABLE_PGD);
}
......@@ -102,9 +80,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pmd_set(pmd, page_address(page));
pmd_set(pmd, page);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
......
......@@ -23,7 +23,18 @@
#define _DESCTYPE_MASK 0x003
#define _CACHEMASK040 (~0x060)
#define _TABLE_MASK (0xfffffe00)
/*
* Currently set to the minimum alignment of table pointers (256 bytes).
* The hardware only uses the low 4 bits for state:
*
* 3 - Used
* 2 - Write Protected
* 0,1 - Descriptor Type
*
* and has the rest of the bits reserved.
*/
#define _TABLE_MASK (0xffffff00)
#define _PAGE_TABLE (_PAGE_SHORT)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
......@@ -108,13 +119,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
unsigned long *ptr = pmdp->pmd;
short i = 16;
while (--i >= 0) {
*ptr++ = ptbl;
ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
}
pmd_val(*pmdp) = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
}
static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
......@@ -138,13 +143,14 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
#define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
#define pmd_clear(pmdp) ({ \
unsigned long *__ptr = pmdp->pmd; \
short __i = 16; \
while (--__i >= 0) \
*__ptr++ = 0; \
})
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pmd_clear(pmdp) ({ pmd_val(*pmdp) = 0; })
/*
* m68k does not have huge pages (020/030 actually could), but generic code
* expects pmd_page() to exists, only to then DCE it all. Provide a dummy to
* make the compiler happy.
*/
#define pmd_page(pmd) NULL
#define pud_none(pud) (!pud_val(pud))
......
......@@ -22,15 +22,25 @@
* These are used to make use of C type-checking..
*/
#if !defined(CONFIG_MMU) || CONFIG_PGTABLE_LEVELS == 3
typedef struct { unsigned long pmd[16]; } pmd_t;
#define pmd_val(x) ((&x)->pmd[0])
#define __pmd(x) ((pmd_t) { { (x) }, })
typedef struct { unsigned long pmd; } pmd_t;
#define pmd_val(x) ((&x)->pmd)
#define __pmd(x) ((pmd_t) { (x) } )
#endif
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#if defined(CONFIG_SUN3)
/*
* Sun3 still uses the asm-generic/pgalloc.h code and thus needs this
* definition. It would be possible to unify Sun3 and ColdFire pgalloc and have
* all of m68k use the same type.
*/
typedef struct page *pgtable_t;
#else
typedef pte_t *pgtable_t;
#endif
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
......
......@@ -36,7 +36,7 @@
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#if CONFIG_PGTABLE_LEVELS == 3
#define PMD_SHIFT 22
#define PMD_SHIFT 18
#endif
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
......@@ -67,8 +67,8 @@
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#else
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD 8
#define PTRS_PER_PTE 64
#define PTRS_PER_PMD 128
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
......@@ -76,8 +76,8 @@
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
#define KMAP_START 0x0DC00000
#define KMAP_END 0x0E000000
#define KMAP_START 0x0dc00000
#define KMAP_END 0x0e000000
#elif defined(CONFIG_COLDFIRE)
#define KMAP_START 0xe0000000
#define KMAP_END 0xf0000000
......
......@@ -40,11 +40,6 @@
void *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
extern void init_pointer_table(unsigned long ptable);
extern pmd_t *zero_pgtable;
#endif
#ifdef CONFIG_MMU
pg_data_t pg_data_map[MAX_NUMNODES];
......@@ -125,20 +120,31 @@ void free_initmem(void)
static inline void init_pointer_tables(void)
{
#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
int i;
int i, j;
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
init_pointer_table(kernel_pg_dir, TABLE_PGD);
for (i = 0; i < PTRS_PER_PGD; i++) {
pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
pud_t *pud = (pud_t *)&kernel_pg_dir[i];
pmd_t *pmd_dir;
if (pud_present(*pud))
init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
}
if (!pud_present(*pud))
continue;
pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]);
init_pointer_table(pmd_dir, TABLE_PMD);
/* insert also pointer table that we used to unmap the zero page */
if (zero_pgtable)
init_pointer_table((unsigned long)zero_pgtable);
for (j = 0; j < PTRS_PER_PMD; j++) {
pmd_t *pmd = &pmd_dir[j];
pte_t *pte_dir;
if (!pmd_present(*pmd))
continue;
pte_dir = (pte_t *)__pmd_page(*pmd);
init_pointer_table(pte_dir, TABLE_PTE);
}
}
#endif
}
......
......@@ -24,8 +24,6 @@
#undef DEBUG
#define PTRTREESIZE (256*1024)
/*
* For 040/060 we can use the virtual memory area like other architectures,
* but for 020/030 we want to use early termination page descriptors and we
......@@ -50,7 +48,7 @@ static inline void free_io_area(void *addr)
#else
#define IO_SIZE (256*1024)
#define IO_SIZE PMD_SIZE
static struct vm_struct *iolist;
......@@ -81,14 +79,13 @@ static void __free_io_area(void *addr, unsigned long size)
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
pmd_clear(pmd_dir);
virtaddr += PMD_SIZE;
size -= PMD_SIZE;
} else if (pmd_type == 0)
continue;
}
......@@ -249,7 +246,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
while ((long)size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
if (!(virtaddr & (PMD_SIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
......@@ -263,10 +260,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
pmd_val(*pmd_dir) = physaddr;
physaddr += PMD_SIZE;
virtaddr += PMD_SIZE;
size -= PMD_SIZE;
} else
#endif
{
......@@ -367,13 +364,12 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
unsigned long pmd = pmd_val(*pmd_dir);
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
_CACHEMASK040) | cmode;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
*pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
virtaddr += PMD_SIZE;
size -= PMD_SIZE;
continue;
}
}
......
......@@ -22,109 +22,6 @@
#include <asm/machdep.h>
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
struct page instead of separately kmalloced struct. Stolen from
arch/sparc/mm/srmmu.c ... */
typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
void __init init_pointer_table(unsigned long ptable)
{
ptable_desc *dp;
unsigned long page = ptable & PAGE_MASK;
unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
dp = PD_PTABLE(page);
if (!(PD_MARKBITS(dp) & mask)) {
PD_MARKBITS(dp) = 0xff;
list_add(dp, &ptable_list);
}
PD_MARKBITS(dp) &= ~mask;
pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
/* unreserve the page so it's possible to free that page */
__ClearPageReserved(PD_PAGE(dp));
init_page_count(PD_PAGE(dp));
return;
}
pmd_t *get_pointer_table (void)
{
ptable_desc *dp = ptable_list.next;
unsigned char mask = PD_MARKBITS (dp);
unsigned char tmp;
unsigned int off;
/*
* For a pointer table for a user process address space, a
* table is taken from a page allocated for the purpose. Each
* page can hold 8 pointer tables. The page is remapped in
* virtual address space to be noncacheable.
*/
if (mask == 0) {
void *page;
ptable_desc *new;
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;
flush_tlb_kernel_page(page);
nocache_page(page);
new = PD_PTABLE(page);
PD_MARKBITS(new) = 0xfe;
list_add_tail(new, dp);
return (pmd_t *)page;
}
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
;
PD_MARKBITS(dp) = mask & ~tmp;
if (!PD_MARKBITS(dp)) {
/* move to end of list */
list_move_tail(dp, &ptable_list);
}
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
}
int free_pointer_table (pmd_t *ptable)
{
ptable_desc *dp;
unsigned long page = (unsigned long)ptable & PAGE_MASK;
unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
dp = PD_PTABLE(page);
if (PD_MARKBITS (dp) & mask)
panic ("table already free!");
PD_MARKBITS (dp) |= mask;
if (PD_MARKBITS(dp) == 0xff) {
/* all tables in page are free, free page */
list_del(dp);
cache_page((void *)page);
free_page (page);
return 1;
} else if (ptable_list.next != dp) {
/*
* move this descriptor to the front of the list, since
* it has one or more free tables.
*/
list_move(dp, &ptable_list);
}
return 0;
}
/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
......
......@@ -45,34 +45,185 @@ unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
/*
* Motorola 680x0 user's manual recommends using uncached memory for address
* translation tables.
*
* Seeing how the MMU can be external on (some of) these chips, that seems like
* a very important recommendation to follow. Provide some helpers to combat
* 'variation' amongst the users of this.
*/
void mmu_page_ctor(void *page)
{
__flush_page_to_ram(page);
flush_tlb_kernel_page(page);
nocache_page(page);
}
void mmu_page_dtor(void *page)
{
cache_page(page);
}
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
struct page instead of separately kmalloced struct. Stolen from
arch/sparc/mm/srmmu.c ... */
typedef struct list_head ptable_desc;
static struct list_head ptable_list[2] = {
LIST_HEAD_INIT(ptable_list[0]),
LIST_HEAD_INIT(ptable_list[1]),
};
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
static const int ptable_shift[2] = {
7+2, /* PGD, PMD */
6+2, /* PTE */
};
#define ptable_size(type) (1U << ptable_shift[type])
#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
void __init init_pointer_table(void *table, int type)
{
ptable_desc *dp;
unsigned long ptable = (unsigned long)table;
unsigned long page = ptable & PAGE_MASK;
unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
dp = PD_PTABLE(page);
if (!(PD_MARKBITS(dp) & mask)) {
PD_MARKBITS(dp) = ptable_mask(type);
list_add(dp, &ptable_list[type]);
}
PD_MARKBITS(dp) &= ~mask;
pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
/* unreserve the page so it's possible to free that page */
__ClearPageReserved(PD_PAGE(dp));
init_page_count(PD_PAGE(dp));
return;
}
void *get_pointer_table(int type)
{
ptable_desc *dp = ptable_list[type].next;
unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
unsigned int tmp, off;
/*
* For a pointer table for a user process address space, a
* table is taken from a page allocated for the purpose. Each
* page can hold 8 pointer tables. The page is remapped in
* virtual address space to be noncacheable.
*/
if (mask == 0) {
void *page;
ptable_desc *new;
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;
if (type == TABLE_PTE) {
/*
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
* SMP.
*/
pgtable_pte_page_ctor(virt_to_page(page));
}
mmu_page_ctor(page);
new = PD_PTABLE(page);
PD_MARKBITS(new) = ptable_mask(type) - 1;
list_add_tail(new, dp);
return (pmd_t *)page;
}
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
;
PD_MARKBITS(dp) = mask & ~tmp;
if (!PD_MARKBITS(dp)) {
/* move to end of list */
list_move_tail(dp, &ptable_list[type]);
}
return page_address(PD_PAGE(dp)) + off;
}
int free_pointer_table(void *table, int type)
{
ptable_desc *dp;
unsigned long ptable = (unsigned long)table;
unsigned long page = ptable & PAGE_MASK;
unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
dp = PD_PTABLE(page);
if (PD_MARKBITS (dp) & mask)
panic ("table already free!");
PD_MARKBITS (dp) |= mask;
if (PD_MARKBITS(dp) == ptable_mask(type)) {
/* all tables in page are free, free page */
list_del(dp);
mmu_page_dtor((void *)page);
if (type == TABLE_PTE)
pgtable_pte_page_dtor(virt_to_page(page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {
/*
* move this descriptor to the front of the list, since
* it has one or more free tables.
*/
list_move(dp, &ptable_list[type]);
}
return 0;
}
/* size of memory already mapped in head.S */
extern __initdata unsigned long m68k_init_mapped_size;
extern unsigned long availmem;
static pte_t *last_pte_table __initdata = NULL;
static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
pte_t *pte_table = last_pte_table;
ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!ptablep)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_table) {
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
}
clear_page(ptablep);
__flush_page_to_ram(ptablep);
flush_tlb_kernel_page(ptablep);
nocache_page(ptablep);
clear_page(pte_table);
mmu_page_ctor(pte_table);
return ptablep;
last_pte_table = pte_table;
}
last_pte_table += PTRS_PER_PTE;
return pte_table;
}
static pmd_t *last_pgtable __initdata = NULL;
pmd_t *zero_pgtable __initdata = NULL;
static pmd_t *last_pmd_table __initdata = NULL;
static pmd_t * __init kernel_ptr_table(void)
{
if (!last_pgtable) {
if (!last_pmd_table) {
unsigned long pmd, last;
int i;
......@@ -91,33 +242,29 @@ static pmd_t * __init kernel_ptr_table(void)
last = pmd;
}
last_pgtable = (pmd_t *)last;
last_pmd_table = (pmd_t *)last;
#ifdef DEBUG
printk("kernel_ptr_init: %p\n", last_pgtable);
printk("kernel_ptr_init: %p\n", last_pmd_table);
#endif
}
last_pgtable += PTRS_PER_PMD;
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
last_pmd_table += PTRS_PER_PMD;
if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
if (!last_pgtable)
if (!last_pmd_table)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram(last_pgtable);
flush_tlb_kernel_page(last_pgtable);
nocache_page(last_pgtable);
clear_page(last_pmd_table);
mmu_page_ctor(last_pmd_table);
}
return last_pgtable;
return last_pmd_table;
}
static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
p4d_t *p4d_dir;
......@@ -135,21 +282,21 @@ static void __init map_node(int node)
while (size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
if (!(virtaddr & (PMD_SIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
if (virtaddr && CPU_IS_020_OR_030) {
if (!(virtaddr & (ROOTTREESIZE-1)) &&
size >= ROOTTREESIZE) {
if (!(virtaddr & (PGDIR_SIZE-1)) &&
size >= PGDIR_SIZE) {
#ifdef DEBUG
printk ("[very early term]");
#endif
pgd_val(*pgd_dir) = physaddr;
size -= ROOTTREESIZE;
virtaddr += ROOTTREESIZE;
physaddr += ROOTTREESIZE;
size -= PGDIR_SIZE;
virtaddr += PGDIR_SIZE;
physaddr += PGDIR_SIZE;
continue;
}
}
......@@ -169,24 +316,23 @@ static void __init map_node(int node)
#ifdef DEBUG
printk ("[early term]");
#endif
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
pmd_val(*pmd_dir) = physaddr;
physaddr += PMD_SIZE;
} else {
int i;
#ifdef DEBUG
printk ("[zero map]");
#endif
zero_pgtable = kernel_ptr_table();
pte_dir = (pte_t *)zero_pgtable;
pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
_PAGE_TABLE | _PAGE_ACCESSED;
pte_dir = kernel_page_table();
pmd_set(pmd_dir, pte_dir);
pte_val(*pte_dir++) = 0;
physaddr += PAGE_SIZE;
for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
pte_val(*pte_dir++) = physaddr;
}
size -= PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PMD_SIZE;
virtaddr += PMD_SIZE;
} else {
if (!pmd_present(*pmd_dir)) {
#ifdef DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment