Commit ee3ddbbd authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] pgd_ctor update

From wli

A moment's reflection on the subject suggests to me it's worthwhile to
generalize pgd_ctor support so it works (without #ifdefs!) on both PAE
and non-PAE. This tiny tweak is actually more noticeably beneficial
on non-PAE systems but only really because pgd_alloc() is more visible;
the most likely reason it's less visible on PAE is "other overhead".
It looks particularly nice since it removes more code than it adds.

Touch tested on NUMA-Q (PAE). OFTC #kn testers testing the non-PAE case.
parent a85cb652
......@@ -508,32 +508,36 @@ void __init mem_init(void)
#endif
}
#if CONFIG_X86_PAE
#include <linux/slab.h>
kmem_cache_t *pae_pmd_cachep;
kmem_cache_t *pae_pgd_cachep;
kmem_cache_t *pmd_cache;
kmem_cache_t *pgd_cache;
void pae_pmd_ctor(void *, kmem_cache_t *, unsigned long);
void pae_pgd_ctor(void *, kmem_cache_t *, unsigned long);
void pmd_ctor(void *, kmem_cache_t *, unsigned long);
void pgd_ctor(void *, kmem_cache_t *, unsigned long);
void __init pgtable_cache_init(void)
{
if (PTRS_PER_PMD > 1) {
pmd_cache = kmem_cache_create("pae_pmd",
PTRS_PER_PMD*sizeof(pmd_t),
0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
pmd_ctor,
NULL);
if (!pmd_cache)
panic("pgtable_cache_init(): cannot create pmd cache");
}
/*
* PAE pgds must be 16-byte aligned:
*/
pae_pmd_cachep = kmem_cache_create("pae_pmd", 4096, 0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pmd_ctor, NULL);
if (!pae_pmd_cachep)
panic("init_pae(): cannot allocate pae_pmd SLAB cache");
pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pgd_ctor, NULL);
if (!pae_pgd_cachep)
panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
pgd_cache = kmem_cache_create("pgd", PTRS_PER_PGD*sizeof(pgd_t), 0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pgd_ctor, NULL);
if (!pgd_cache)
panic("pgtable_cache_init(): Cannot create pgd cache");
}
#endif
/* Put this after the callers, so that it cannot be inlined */
static int do_test_wp_bit(void)
......
......@@ -166,19 +166,20 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
#if CONFIG_X86_PAE
extern kmem_cache_t *pmd_cache;
extern kmem_cache_t *pgd_cache;
extern kmem_cache_t *pae_pmd_cachep;
void pae_pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags)
void pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags)
{
clear_page(__pmd);
}
void pae_pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags)
void pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags)
{
pgd_t *pgd = __pgd;
if (PTRS_PER_PMD == 1)
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
......@@ -187,59 +188,38 @@ void pae_pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, SLAB_KERNEL);
pgd_t *pgd = kmem_cache_alloc(pgd_cache, SLAB_KERNEL);
if (!pgd)
if (PTRS_PER_PMD == 1)
return pgd;
else if (!pgd)
return NULL;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = kmem_cache_alloc(pae_pmd_cachep, SLAB_KERNEL);
pmd_t *pmd = kmem_cache_alloc(pmd_cache, SLAB_KERNEL);
if (!pmd)
goto out_oom;
else if ((unsigned long)pmd & ~PAGE_MASK) {
printk("kmem_cache_alloc did wrong! death ensues!\n");
goto out_oom;
}
set_pgd(pgd + i, __pgd(1 + __pa((unsigned long long)((unsigned long)pmd))));
}
return pgd;
out_oom:
for (i--; i >= 0; --i)
kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, (void *)pgd);
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pgd_cache, (void *)pgd);
return NULL;
}
void pgd_free(pgd_t *pgd)
{
int i;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1));
set_pgd(pgd + i, __pgd(0));
}
kmem_cache_free(pae_pgd_cachep, (void *)pgd);
}
#else
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
if (PTRS_PER_PMD > 1) {
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
set_pgd(pgd + i, __pgd(0));
}
}
return pgd;
}
void pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
kmem_cache_free(pgd_cache, (void *)pgd);
}
#endif /* CONFIG_X86_PAE */
......@@ -115,6 +115,4 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
return __pmd(((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
}
extern struct kmem_cache_s *pae_pgd_cachep;
#endif /* _I386_PGTABLE_3LEVEL_H */
......@@ -41,21 +41,12 @@ extern unsigned long empty_zero_page[1024];
#ifndef __ASSEMBLY__
#if CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
/*
* Need to initialise the X86 PAE caches
*/
extern void pgtable_cache_init(void);
#else
# include <asm/pgtable-2level.h>
#endif
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
void pgtable_cache_init(void);
#endif
#endif
#define __beep() asm("movb $0x3,%al; outb %al,$0x61")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment