Commit 833b706c authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

slab: destroy a slab without holding any alien cache lock

I haven't heard that this alien cache lock is contended, but to reduce
chance of contention would be better generally.  And with this change,
we can simplify complex lockdep annotation in slab code.  In the
following patch, it will be implemented.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 49dfc304
...@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr) ...@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr)
} }
static void __drain_alien_cache(struct kmem_cache *cachep, static void __drain_alien_cache(struct kmem_cache *cachep,
struct array_cache *ac, int node) struct array_cache *ac, int node,
struct list_head *list)
{ {
struct kmem_cache_node *n = get_node(cachep, node); struct kmem_cache_node *n = get_node(cachep, node);
LIST_HEAD(list);
if (ac->avail) { if (ac->avail) {
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
...@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep, ...@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
if (n->shared) if (n->shared)
transfer_objects(n->shared, ac, ac->limit); transfer_objects(n->shared, ac, ac->limit);
free_block(cachep, ac->entry, ac->avail, node, &list); free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0; ac->avail = 0;
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
} }
} }
...@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) ...@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
if (alc) { if (alc) {
ac = &alc->ac; ac = &alc->ac;
if (ac->avail && spin_trylock_irq(&alc->lock)) { if (ac->avail && spin_trylock_irq(&alc->lock)) {
__drain_alien_cache(cachep, ac, node); LIST_HEAD(list);
__drain_alien_cache(cachep, ac, node, &list);
spin_unlock_irq(&alc->lock); spin_unlock_irq(&alc->lock);
slabs_destroy(cachep, &list);
} }
} }
} }
...@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep, ...@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
for_each_online_node(i) { for_each_online_node(i) {
alc = alien[i]; alc = alien[i];
if (alc) { if (alc) {
LIST_HEAD(list);
ac = &alc->ac; ac = &alc->ac;
spin_lock_irqsave(&alc->lock, flags); spin_lock_irqsave(&alc->lock, flags);
__drain_alien_cache(cachep, ac, i); __drain_alien_cache(cachep, ac, i, &list);
spin_unlock_irqrestore(&alc->lock, flags); spin_unlock_irqrestore(&alc->lock, flags);
slabs_destroy(cachep, &list);
} }
} }
} }
...@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
spin_lock(&alien->lock); spin_lock(&alien->lock);
if (unlikely(ac->avail == ac->limit)) { if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep); STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, ac, nodeid); __drain_alien_cache(cachep, ac, nodeid, &list);
} }
ac_put_obj(cachep, ac, objp); ac_put_obj(cachep, ac, objp);
spin_unlock(&alien->lock); spin_unlock(&alien->lock);
slabs_destroy(cachep, &list);
} else { } else {
n = get_node(cachep, nodeid); n = get_node(cachep, nodeid);
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment