Commit 3fe5f54e authored by Manfred Spraul's avatar Manfred Spraul Committed by Linus Torvalds

[PATCH] slab: reduce fragmentation due to kmem_cache_alloc_node

Attached is a patch that fixes the fragmentation that Badri noticed with
kmem_cache_alloc_node.

kmem_cache_alloc_node tries to allocate memory from a given node. The
current implementation contains two bugs:

- the node aware code was used even for !CONFIG_NUMA systems.  Fix:
  inline function that redefines kmem_cache_alloc_node as kmem_cache_alloc
  for !CONFIG_NUMA.

- the code always allocated a new slab for each new allocation.  This
  caused severe fragmentation.  Fix: walk the slabp lists and search for a
  matching page instead of allocating a new page.

- the patch also adds a new statistics field for node-local allocs.  They
  should be rare - the codepath is quite slow, especially compared to the
  normal kmem_cache_alloc.
Signed-Off-By: default avatarManfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 69028004
...@@ -61,7 +61,14 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo ...@@ -61,7 +61,14 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo
extern int kmem_cache_destroy(kmem_cache_t *); extern int kmem_cache_destroy(kmem_cache_t *);
extern int kmem_cache_shrink(kmem_cache_t *); extern int kmem_cache_shrink(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, int); extern void *kmem_cache_alloc(kmem_cache_t *, int);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(kmem_cache_t *, int); extern void *kmem_cache_alloc_node(kmem_cache_t *, int);
#else
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node)
{
return kmem_cache_alloc(cachep, GFP_KERNEL);
}
#endif
extern void kmem_cache_free(kmem_cache_t *, void *); extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *); extern unsigned int kmem_cache_size(kmem_cache_t *);
......
...@@ -327,6 +327,7 @@ struct kmem_cache_s { ...@@ -327,6 +327,7 @@ struct kmem_cache_s {
unsigned long reaped; unsigned long reaped;
unsigned long errors; unsigned long errors;
unsigned long max_freeable; unsigned long max_freeable;
unsigned long node_allocs;
atomic_t allochit; atomic_t allochit;
atomic_t allocmiss; atomic_t allocmiss;
atomic_t freehit; atomic_t freehit;
...@@ -361,6 +362,7 @@ struct kmem_cache_s { ...@@ -361,6 +362,7 @@ struct kmem_cache_s {
(x)->high_mark = (x)->num_active; \ (x)->high_mark = (x)->num_active; \
} while (0) } while (0)
#define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
#define STATS_SET_FREEABLE(x, i) \ #define STATS_SET_FREEABLE(x, i) \
do { if ((x)->max_freeable < i) \ do { if ((x)->max_freeable < i) \
(x)->max_freeable = i; \ (x)->max_freeable = i; \
...@@ -378,6 +380,7 @@ struct kmem_cache_s { ...@@ -378,6 +380,7 @@ struct kmem_cache_s {
#define STATS_INC_REAPED(x) do { } while (0) #define STATS_INC_REAPED(x) do { } while (0)
#define STATS_SET_HIGH(x) do { } while (0) #define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#define STATS_SET_FREEABLE(x, i) \ #define STATS_SET_FREEABLE(x, i) \
do { } while (0) do { } while (0)
...@@ -1747,7 +1750,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) ...@@ -1747,7 +1750,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
* Grow (by 1) the number of slabs within a cache. This is called by * Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache. * kmem_cache_alloc() when there are no active objs left in a cache.
*/ */
static int cache_grow (kmem_cache_t * cachep, int flags) static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid)
{ {
struct slab *slabp; struct slab *slabp;
void *objp; void *objp;
...@@ -1798,7 +1801,7 @@ static int cache_grow (kmem_cache_t * cachep, int flags) ...@@ -1798,7 +1801,7 @@ static int cache_grow (kmem_cache_t * cachep, int flags)
/* Get mem for the objs. */ /* Get mem for the objs. */
if (!(objp = kmem_getpages(cachep, flags, -1))) if (!(objp = kmem_getpages(cachep, flags, nodeid)))
goto failed; goto failed;
/* Get slab management. */ /* Get slab management. */
...@@ -2032,7 +2035,7 @@ static void* cache_alloc_refill(kmem_cache_t* cachep, int flags) ...@@ -2032,7 +2035,7 @@ static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
if (unlikely(!ac->avail)) { if (unlikely(!ac->avail)) {
int x; int x;
x = cache_grow(cachep, flags); x = cache_grow(cachep, flags, -1);
// cache_grow can reenable interrupts, then ac could change. // cache_grow can reenable interrupts, then ac could change.
ac = ac_data(cachep); ac = ac_data(cachep);
...@@ -2313,6 +2316,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) ...@@ -2313,6 +2316,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
return 0; return 0;
} }
#ifdef CONFIG_NUMA
/** /**
* kmem_cache_alloc_node - Allocate an object on the specified node * kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from. * @cachep: The cache to allocate from.
...@@ -2325,69 +2329,80 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) ...@@ -2325,69 +2329,80 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
*/ */
void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
{ {
size_t offset; int loop;
void *objp; void *objp;
struct slab *slabp; struct slab *slabp;
kmem_bufctl_t next; kmem_bufctl_t next;
/* The main algorithms are not node aware, thus we have to cheat: for (loop = 0;;loop++) {
* We bypass all caches and allocate a new slab. struct list_head *q;
* The following code is a streamlined copy of cache_grow().
*/
/* Get colour for the slab, and update the next value. */ objp = NULL;
spin_lock_irq(&cachep->spinlock); check_irq_on();
offset = cachep->colour_next; spin_lock_irq(&cachep->spinlock);
cachep->colour_next++; /* walk through all partial and empty slab and find one
if (cachep->colour_next >= cachep->colour) * from the right node */
cachep->colour_next = 0; list_for_each(q,&cachep->lists.slabs_partial) {
offset *= cachep->colour_off; slabp = list_entry(q, struct slab, list);
spin_unlock_irq(&cachep->spinlock);
if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
loop > 2)
goto got_slabp;
}
list_for_each(q, &cachep->lists.slabs_free) {
slabp = list_entry(q, struct slab, list);
/* Get mem for the objs. */ if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
if (!(objp = kmem_getpages(cachep, GFP_KERNEL, nodeid))) loop > 2)
goto failed; goto got_slabp;
}
spin_unlock_irq(&cachep->spinlock);
/* Get slab management. */ local_irq_disable();
if (!(slabp = alloc_slabmgmt(cachep, objp, offset, GFP_KERNEL))) if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
goto opps1; local_irq_enable();
return NULL;
}
local_irq_enable();
}
got_slabp:
/* found one: allocate object */
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep);
set_slab_attr(cachep, slabp, objp); STATS_INC_ALLOCED(cachep);
cache_init_objs(cachep, slabp, SLAB_CTOR_CONSTRUCTOR); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
STATS_INC_NODEALLOCS(cachep);
/* The first object is ours: */
objp = slabp->s_mem + slabp->free*cachep->objsize; objp = slabp->s_mem + slabp->free*cachep->objsize;
slabp->inuse++; slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free]; next = slab_bufctl(slabp)[slabp->free];
#if DEBUG #if DEBUG
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
#endif #endif
slabp->free = next; slabp->free = next;
/* add the remaining objects into the cache */
spin_lock_irq(&cachep->spinlock);
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
STATS_INC_GROWN(cachep);
/* Make slab active. */ /* move slabp to correct slabp list: */
if (slabp->free == BUFCTL_END) { list_del(&slabp->list);
list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_full)); if (slabp->free == BUFCTL_END)
} else { list_add(&slabp->list, &cachep->lists.slabs_full);
list_add_tail(&slabp->list, else
&(list3_data(cachep)->slabs_partial)); list_add(&slabp->list, &cachep->lists.slabs_partial);
list3_data(cachep)->free_objects += cachep->num-1;
} list3_data(cachep)->free_objects--;
spin_unlock_irq(&cachep->spinlock); spin_unlock_irq(&cachep->spinlock);
objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp, objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp,
__builtin_return_address(0)); __builtin_return_address(0));
return objp; return objp;
opps1:
kmem_freepages(cachep, objp);
failed:
return NULL;
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
/** /**
* kmalloc - allocate memory * kmalloc - allocate memory
* @size: how many bytes of memory are required. * @size: how many bytes of memory are required.
...@@ -2812,15 +2827,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -2812,15 +2827,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
* without _too_ many complaints. * without _too_ many complaints.
*/ */
#if STATS #if STATS
seq_puts(m, "slabinfo - version: 2.0 (statistics)\n"); seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else #else
seq_puts(m, "slabinfo - version: 2.0\n"); seq_puts(m, "slabinfo - version: 2.1\n");
#endif #endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_puts(m, " : tunables <batchcount> <limit> <sharedfactor>"); seq_puts(m, " : tunables <batchcount> <limit> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS #if STATS
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <freelimit>"); seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"
" <error> <maxfreeable> <freelimit> <nodeallocs>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif #endif
seq_putc(m, '\n'); seq_putc(m, '\n');
...@@ -2911,10 +2927,11 @@ static int s_show(struct seq_file *m, void *p) ...@@ -2911,10 +2927,11 @@ static int s_show(struct seq_file *m, void *p)
unsigned long errors = cachep->errors; unsigned long errors = cachep->errors;
unsigned long max_freeable = cachep->max_freeable; unsigned long max_freeable = cachep->max_freeable;
unsigned long free_limit = cachep->free_limit; unsigned long free_limit = cachep->free_limit;
unsigned long node_allocs = cachep->node_allocs;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu", seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu",
allocs, high, grown, reaped, errors, allocs, high, grown, reaped, errors,
max_freeable, free_limit); max_freeable, free_limit, node_allocs);
} }
/* cpu stats */ /* cpu stats */
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment