Commit f1d1a842 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

SLUB: i386 support

SLUB cannot run on i386 at this point because i386 uses the page->private and
page->index field of slab pages for the pgd cache.

Make SLUB run on i386 by replacing the pgd slab cache with a quicklist.
Limit the changes as much as possible. Leave the improvised linked list in place
etc etc. This has been working here for a couple of weeks now.
Acked-by: default avatarWilliam Lee Irwin III <wli@holomorphy.com>
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8df767dd
...@@ -55,6 +55,10 @@ config ZONE_DMA ...@@ -55,6 +55,10 @@ config ZONE_DMA
bool bool
default y default y
config QUICKLIST
bool
default y
config SBUS config SBUS
bool bool
...@@ -79,10 +83,6 @@ config ARCH_MAY_HAVE_PC_FDC ...@@ -79,10 +83,6 @@ config ARCH_MAY_HAVE_PC_FDC
bool bool
default y default y
config ARCH_USES_SLAB_PAGE_STRUCT
bool
default y
config DMI config DMI
bool bool
default y default y
......
...@@ -186,6 +186,7 @@ void cpu_idle(void) ...@@ -186,6 +186,7 @@ void cpu_idle(void)
if (__get_cpu_var(cpu_idle_state)) if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0; __get_cpu_var(cpu_idle_state) = 0;
check_pgt_cache();
rmb(); rmb();
idle = pm_idle; idle = pm_idle;
......
...@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm) ...@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm)
} }
if (!cpus_empty(cpu_mask)) if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
check_pgt_cache();
preempt_enable(); preempt_enable();
} }
......
...@@ -740,7 +740,6 @@ int remove_memory(u64 start, u64 size) ...@@ -740,7 +740,6 @@ int remove_memory(u64 start, u64 size)
EXPORT_SYMBOL_GPL(remove_memory); EXPORT_SYMBOL_GPL(remove_memory);
#endif #endif
struct kmem_cache *pgd_cache;
struct kmem_cache *pmd_cache; struct kmem_cache *pmd_cache;
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
...@@ -764,12 +763,6 @@ void __init pgtable_cache_init(void) ...@@ -764,12 +763,6 @@ void __init pgtable_cache_init(void)
pgd_size = PAGE_SIZE; pgd_size = PAGE_SIZE;
} }
} }
pgd_cache = kmem_cache_create("pgd",
pgd_size,
pgd_size,
SLAB_PANIC,
pgd_ctor,
(!SHARED_KERNEL_PMD) ? pgd_dtor : NULL);
} }
/* /*
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/quicklist.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -205,8 +206,6 @@ void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) ...@@ -205,8 +206,6 @@ void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
* against pageattr.c; it is the unique case in which a valid change * against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed. * vmalloc faults work because attached pagetables are never freed.
* The locking scheme was chosen on the basis of manfred's
* recommendations and having no core impact whatsoever.
* -- wli * -- wli
*/ */
DEFINE_SPINLOCK(pgd_lock); DEFINE_SPINLOCK(pgd_lock);
...@@ -232,9 +231,11 @@ static inline void pgd_list_del(pgd_t *pgd) ...@@ -232,9 +231,11 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long)pprev); set_page_private(next, (unsigned long)pprev);
} }
#if (PTRS_PER_PMD == 1) #if (PTRS_PER_PMD == 1)
/* Non-PAE pgd constructor */ /* Non-PAE pgd constructor */
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) void pgd_ctor(void *pgd)
{ {
unsigned long flags; unsigned long flags;
...@@ -256,7 +257,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) ...@@ -256,7 +257,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
} }
#else /* PTRS_PER_PMD > 1 */ #else /* PTRS_PER_PMD > 1 */
/* PAE pgd constructor */ /* PAE pgd constructor */
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) void pgd_ctor(void *pgd)
{ {
/* PAE, kernel PMD may be shared */ /* PAE, kernel PMD may be shared */
...@@ -275,11 +276,12 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) ...@@ -275,11 +276,12 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
} }
#endif /* PTRS_PER_PMD */ #endif /* PTRS_PER_PMD */
void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) void pgd_dtor(void *pgd)
{ {
unsigned long flags; /* can be called from interrupt context */ unsigned long flags; /* can be called from interrupt context */
BUG_ON(SHARED_KERNEL_PMD); if (SHARED_KERNEL_PMD)
return;
paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
spin_lock_irqsave(&pgd_lock, flags); spin_lock_irqsave(&pgd_lock, flags);
...@@ -321,7 +323,7 @@ static void pmd_cache_free(pmd_t *pmd, int idx) ...@@ -321,7 +323,7 @@ static void pmd_cache_free(pmd_t *pmd, int idx)
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
int i; int i;
pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
if (PTRS_PER_PMD == 1 || !pgd) if (PTRS_PER_PMD == 1 || !pgd)
return pgd; return pgd;
...@@ -344,7 +346,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -344,7 +346,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
pmd_cache_free(pmd, i); pmd_cache_free(pmd, i);
} }
kmem_cache_free(pgd_cache, pgd); quicklist_free(0, pgd_dtor, pgd);
return NULL; return NULL;
} }
...@@ -361,5 +363,11 @@ void pgd_free(pgd_t *pgd) ...@@ -361,5 +363,11 @@ void pgd_free(pgd_t *pgd)
pmd_cache_free(pmd, i); pmd_cache_free(pmd, i);
} }
/* in the non-PAE case, free_pgtables() clears user pgd entries */ /* in the non-PAE case, free_pgtables() clears user pgd entries */
kmem_cache_free(pgd_cache, pgd); quicklist_free(0, pgd_dtor, pgd);
} }
void check_pgt_cache(void)
{
quicklist_trim(0, pgd_dtor, 25, 16);
}
...@@ -65,6 +65,4 @@ do { \ ...@@ -65,6 +65,4 @@ do { \
#define pud_populate(mm, pmd, pte) BUG() #define pud_populate(mm, pmd, pte) BUG()
#endif #endif
#define check_pgt_cache() do { } while (0)
#endif /* _I386_PGALLOC_H */ #endif /* _I386_PGALLOC_H */
...@@ -35,17 +35,16 @@ struct vm_area_struct; ...@@ -35,17 +35,16 @@ struct vm_area_struct;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern unsigned long empty_zero_page[1024]; extern unsigned long empty_zero_page[1024];
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];
extern struct kmem_cache *pgd_cache;
extern struct kmem_cache *pmd_cache; extern struct kmem_cache *pmd_cache;
extern spinlock_t pgd_lock; extern spinlock_t pgd_lock;
extern struct page *pgd_list; extern struct page *pgd_list;
void check_pgt_cache(void);
void pmd_ctor(void *, struct kmem_cache *, unsigned long); void pmd_ctor(void *, struct kmem_cache *, unsigned long);
void pgd_ctor(void *, struct kmem_cache *, unsigned long);
void pgd_dtor(void *, struct kmem_cache *, unsigned long);
void pgtable_cache_init(void); void pgtable_cache_init(void);
void paging_init(void); void paging_init(void);
/* /*
* The Linux x86 paging architecture is 'compile-time dual-mode', it * The Linux x86 paging architecture is 'compile-time dual-mode', it
* implements both the traditional 2-level x86 page tables and the * implements both the traditional 2-level x86 page tables and the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment