Commit fd21015e authored by Anton Blanchard's avatar Anton Blanchard

ppc64: slabify ppc64 pagetables, from Bill Irwin

parent 3244d5cc
......@@ -726,3 +726,22 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
}
kmem_cache_t *zero_cache;
static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
{
memset(pte, 0, PAGE_SIZE);
}
void pgtable_cache_init(void)
{
zero_cache = kmem_cache_create("zero",
PAGE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
zero_ctor,
NULL);
if (!zero_cache)
panic("pgtable_cache_init(): could not create zero_cache!\n");
}
......@@ -2,8 +2,11 @@
#define _PPC64_PGALLOC_H
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/processor.h>
extern kmem_cache_t *zero_cache;
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
......@@ -14,16 +17,13 @@
static inline pgd_t *
pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd != NULL)
clear_page(pgd);
return pgd;
return kmem_cache_alloc(zero_cache, GFP_KERNEL);
}
static inline void
pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
kmem_cache_free(zero_cache, pgd);
}
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
......@@ -31,18 +31,13 @@ pgd_free(pgd_t *pgd)
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pmd_t *pmd;
pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pmd)
clear_page(pmd);
return pmd;
return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
}
static inline void
pmd_free(pmd_t *pmd)
{
free_page((unsigned long)pmd);
kmem_cache_free(zero_cache, pmd);
}
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
......@@ -54,12 +49,7 @@ pmd_free(pmd_t *pmd)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
}
static inline struct page *
......@@ -76,7 +66,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void
pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
kmem_cache_free(zero_cache, pte);
}
#define pte_free(pte_page) pte_free_kernel(page_address(pte_page))
......
......@@ -395,10 +395,7 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#define io_remap_page_range remap_page_range
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
void pgtable_cache_init(void);
extern void hpte_init_pSeries(void);
extern void hpte_init_iSeries(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment