Commit 2007b52c authored by Linus Torvalds's avatar Linus Torvalds

Revert pmd/pgd slabification. wli will fix it properly

Cset exclude: akpm@digeo.com|ChangeSet|20030204165956|06074
Cset exclude: akpm@digeo.com|ChangeSet|20030204165949|06077
parent 1dbc2a79
...@@ -505,36 +505,20 @@ void __init mem_init(void) ...@@ -505,36 +505,20 @@ void __init mem_init(void)
#endif #endif
} }
#include <linux/slab.h> #if CONFIG_X86_PAE
struct kmem_cache_s *pae_pgd_cachep;
kmem_cache_t *pmd_cache;
kmem_cache_t *pgd_cache;
void pmd_ctor(void *, kmem_cache_t *, unsigned long);
void pgd_ctor(void *, kmem_cache_t *, unsigned long);
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
{ {
if (PTRS_PER_PMD > 1) {
pmd_cache = kmem_cache_create("pae_pmd",
PTRS_PER_PMD*sizeof(pmd_t),
0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
pmd_ctor,
NULL);
if (!pmd_cache)
panic("pgtable_cache_init(): cannot create pmd cache");
}
/* /*
* PAE pgds must be 16-byte aligned: * PAE pgds must be 16-byte aligned:
*/ */
pgd_cache = kmem_cache_create("pgd", PTRS_PER_PGD*sizeof(pgd_t), 0, pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pgd_ctor, NULL); SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
if (!pgd_cache) if (!pae_pgd_cachep)
panic("pgtable_cache_init(): Cannot create pgd cache"); panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
} }
#endif
/* /*
* This function cannot be __init, since exceptions don't work in that * This function cannot be __init, since exceptions don't work in that
......
...@@ -151,47 +151,30 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -151,47 +151,30 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte; return pte;
} }
extern kmem_cache_t *pmd_cache; #if CONFIG_X86_PAE
extern kmem_cache_t *pgd_cache;
void pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags)
{
clear_page(__pmd);
}
void pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags)
{
pgd_t *pgd = __pgd;
if (PTRS_PER_PMD == 1)
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
int i; int i;
pgd_t *pgd = kmem_cache_alloc(pgd_cache, SLAB_KERNEL); pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
if (PTRS_PER_PMD == 1) if (pgd) {
return pgd; for (i = 0; i < USER_PTRS_PER_PGD; i++) {
else if (!pgd) unsigned long pmd = __get_free_page(GFP_KERNEL);
return NULL;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = kmem_cache_alloc(pmd_cache, SLAB_KERNEL);
if (!pmd) if (!pmd)
goto out_oom; goto out_oom;
set_pgd(pgd + i, __pgd(1 + __pa((unsigned long long)((unsigned long)pmd)))); clear_page(pmd);
set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
}
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
} }
return pgd; return pgd;
out_oom: out_oom:
for (i--; i >= 0; --i) for (i--; i >= 0; i--)
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pgd_cache, (void *)pgd); kmem_cache_free(pae_pgd_cachep, pgd);
return NULL; return NULL;
} }
...@@ -199,12 +182,30 @@ void pgd_free(pgd_t *pgd) ...@@ -199,12 +182,30 @@ void pgd_free(pgd_t *pgd)
{ {
int i; int i;
if (PTRS_PER_PMD > 1) { for (i = 0; i < USER_PTRS_PER_PGD; i++)
for (i = 0; i < USER_PTRS_PER_PGD; ++i) { free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); kmem_cache_free(pae_pgd_cachep, pgd);
set_pgd(pgd + i, __pgd(0)); }
}
#else
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
} }
return pgd;
}
kmem_cache_free(pgd_cache, (void *)pgd); void pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
} }
#endif /* CONFIG_X86_PAE */
...@@ -20,11 +20,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p ...@@ -20,11 +20,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
* Allocate and free page tables. * Allocate and free page tables.
*/ */
pgd_t *pgd_alloc(struct mm_struct *); extern pgd_t *pgd_alloc(struct mm_struct *);
void pgd_free(pgd_t *pgd); extern void pgd_free(pgd_t *pgd);
pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
struct page *pte_alloc_one(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
......
...@@ -123,4 +123,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) ...@@ -123,4 +123,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
#define PTE_FILE_MAX_BITS 32 #define PTE_FILE_MAX_BITS 32
extern struct kmem_cache_s *pae_pgd_cachep;
#endif /* _I386_PGTABLE_3LEVEL_H */ #endif /* _I386_PGTABLE_3LEVEL_H */
...@@ -41,12 +41,21 @@ extern unsigned long empty_zero_page[1024]; ...@@ -41,12 +41,21 @@ extern unsigned long empty_zero_page[1024];
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h> # include <asm/pgtable-3level.h>
/*
* Need to initialise the X86 PAE caches
*/
extern void pgtable_cache_init(void);
#else #else
# include <asm/pgtable-2level.h> # include <asm/pgtable-2level.h>
#endif
void pgtable_cache_init(void); /*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif
#endif #endif
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment