Commit ed11d9eb authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] slab: consolidate code to free slabs from freelist

Post and discussion:
http://marc.theaimsgroup.com/?t=115074342800003&r=1&w=2

Code in __shrink_node() duplicates code in cache_reap()

Add a new function drain_freelist that removes slabs with objects that are
already free and use that in various places.

This eliminates the __node_shrink() function and provides the interrupt
holdoff reduction from slab_free to code that used to call __node_shrink.

[akpm@osdl.org: build fixes]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f8891e5e
...@@ -309,6 +309,13 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; ...@@ -309,6 +309,13 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define SIZE_AC 1 #define SIZE_AC 1
#define SIZE_L3 (1 + MAX_NUMNODES) #define SIZE_L3 (1 + MAX_NUMNODES)
static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused);
/* /*
* This function must be completely optimized away if a constant is passed to * This function must be completely optimized away if a constant is passed to
* it. Mostly the same as what is in linux/slab.h except it returns an index. * it. Mostly the same as what is in linux/slab.h except it returns an index.
...@@ -456,7 +463,7 @@ struct kmem_cache { ...@@ -456,7 +463,7 @@ struct kmem_cache {
#define STATS_DEC_ACTIVE(x) ((x)->num_active--) #define STATS_DEC_ACTIVE(x) ((x)->num_active--)
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) #define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++) #define STATS_INC_GROWN(x) ((x)->grown++)
#define STATS_INC_REAPED(x) ((x)->reaped++) #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
#define STATS_SET_HIGH(x) \ #define STATS_SET_HIGH(x) \
do { \ do { \
if ((x)->num_active > (x)->high_mark) \ if ((x)->num_active > (x)->high_mark) \
...@@ -480,7 +487,7 @@ struct kmem_cache { ...@@ -480,7 +487,7 @@ struct kmem_cache {
#define STATS_DEC_ACTIVE(x) do { } while (0) #define STATS_DEC_ACTIVE(x) do { } while (0)
#define STATS_INC_ALLOCED(x) do { } while (0) #define STATS_INC_ALLOCED(x) do { } while (0)
#define STATS_INC_GROWN(x) do { } while (0) #define STATS_INC_GROWN(x) do { } while (0)
#define STATS_INC_REAPED(x) do { } while (0) #define STATS_ADD_REAPED(x,y) do { } while (0)
#define STATS_SET_HIGH(x) do { } while (0) #define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0)
...@@ -700,12 +707,6 @@ int slab_is_available(void) ...@@ -700,12 +707,6 @@ int slab_is_available(void)
static DEFINE_PER_CPU(struct work_struct, reap_work); static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused);
static int __node_shrink(struct kmem_cache *cachep, int node);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{ {
return cachep->array[smp_processor_id()]; return cachep->array[smp_processor_id()];
...@@ -1241,10 +1242,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -1241,10 +1242,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
l3 = cachep->nodelists[node]; l3 = cachep->nodelists[node];
if (!l3) if (!l3)
continue; continue;
spin_lock_irq(&l3->list_lock); drain_freelist(cachep, l3, l3->free_objects);
/* free slabs belonging to this node */
__node_shrink(cachep, node);
spin_unlock_irq(&l3->list_lock);
} }
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
break; break;
...@@ -2248,32 +2246,45 @@ static void drain_cpu_caches(struct kmem_cache *cachep) ...@@ -2248,32 +2246,45 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
} }
} }
static int __node_shrink(struct kmem_cache *cachep, int node) /*
* Remove slabs from the list of free slabs.
* Specify the number of slabs to drain in tofree.
*
* Returns the actual number of slabs released.
*/
static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree)
{ {
struct list_head *p;
int nr_freed;
struct slab *slabp; struct slab *slabp;
struct kmem_list3 *l3 = cachep->nodelists[node];
int ret;
for (;;) { nr_freed = 0;
struct list_head *p; while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
spin_lock_irq(&l3->list_lock);
p = l3->slabs_free.prev; p = l3->slabs_free.prev;
if (p == &l3->slabs_free) if (p == &l3->slabs_free) {
break; spin_unlock_irq(&l3->list_lock);
goto out;
}
slabp = list_entry(l3->slabs_free.prev, struct slab, list); slabp = list_entry(p, struct slab, list);
#if DEBUG #if DEBUG
BUG_ON(slabp->inuse); BUG_ON(slabp->inuse);
#endif #endif
list_del(&slabp->list); list_del(&slabp->list);
/*
l3->free_objects -= cachep->num; * Safe to drop the lock. The slab is no longer linked
* to the cache.
*/
l3->free_objects -= cache->num;
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
slab_destroy(cachep, slabp); slab_destroy(cache, slabp);
spin_lock_irq(&l3->list_lock); nr_freed++;
} }
ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); out:
return ret; return nr_freed;
} }
static int __cache_shrink(struct kmem_cache *cachep) static int __cache_shrink(struct kmem_cache *cachep)
...@@ -2286,11 +2297,13 @@ static int __cache_shrink(struct kmem_cache *cachep) ...@@ -2286,11 +2297,13 @@ static int __cache_shrink(struct kmem_cache *cachep)
check_irq_on(); check_irq_on();
for_each_online_node(i) { for_each_online_node(i) {
l3 = cachep->nodelists[i]; l3 = cachep->nodelists[i];
if (l3) { if (!l3)
spin_lock_irq(&l3->list_lock); continue;
ret += __node_shrink(cachep, i);
spin_unlock_irq(&l3->list_lock); drain_freelist(cachep, l3, l3->free_objects);
}
ret += !list_empty(&l3->slabs_full) ||
!list_empty(&l3->slabs_partial);
} }
return (ret ? 1 : 0); return (ret ? 1 : 0);
} }
...@@ -3694,10 +3707,6 @@ static void cache_reap(void *unused) ...@@ -3694,10 +3707,6 @@ static void cache_reap(void *unused)
} }
list_for_each_entry(searchp, &cache_chain, next) { list_for_each_entry(searchp, &cache_chain, next) {
struct list_head *p;
int tofree;
struct slab *slabp;
check_irq_on(); check_irq_on();
/* /*
...@@ -3722,41 +3731,15 @@ static void cache_reap(void *unused) ...@@ -3722,41 +3731,15 @@ static void cache_reap(void *unused)
drain_array(searchp, l3, l3->shared, 0, node); drain_array(searchp, l3, l3->shared, 0, node);
if (l3->free_touched) { if (l3->free_touched)
l3->free_touched = 0; l3->free_touched = 0;
goto next; else {
} int freed;
tofree = (l3->free_limit + 5 * searchp->num - 1) /
(5 * searchp->num);
do {
/*
* Do not lock if there are no free blocks.
*/
if (list_empty(&l3->slabs_free))
break;
spin_lock_irq(&l3->list_lock); freed = drain_freelist(searchp, l3, (l3->free_limit +
p = l3->slabs_free.next; 5 * searchp->num - 1) / (5 * searchp->num));
if (p == &(l3->slabs_free)) { STATS_ADD_REAPED(searchp, freed);
spin_unlock_irq(&l3->list_lock);
break;
} }
slabp = list_entry(p, struct slab, list);
BUG_ON(slabp->inuse);
list_del(&slabp->list);
STATS_INC_REAPED(searchp);
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache. searchp cannot disappear, we hold
* cache_chain_lock
*/
l3->free_objects -= searchp->num;
spin_unlock_irq(&l3->list_lock);
slab_destroy(searchp, slabp);
} while (--tofree > 0);
next: next:
cond_resched(); cond_resched();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment