Commit f9e13c0a authored by Shakeel Butt's avatar Shakeel Butt Committed by Linus Torvalds

slab, slub: skip unnecessary kasan_cache_shutdown()

The kasan quarantine is designed to delay freeing slab objects to catch
use-after-free.  The quarantine can be large (several percent of machine
memory size).  When kmem_caches are deleted related objects are flushed
from the quarantine but this requires scanning the entire quarantine
which can be very slow.  We have seen the kernel busily working on this
while holding slab_mutex and badly affecting cache_reaper, slabinfo
readers and memcg kmem cache creations.

It can easily reproduced by following script:

	yes . | head -1000000 | xargs stat > /dev/null
	for i in `seq 1 10`; do
		seq 500 | (cd /cg/memory && xargs mkdir)
		seq 500 | xargs -I{} sh -c 'echo $BASHPID > \
			/cg/memory/{}/tasks && exec stat .' > /dev/null
		seq 500 | (cd /cg/memory && xargs rmdir)
	done

The busy stack:
    kasan_cache_shutdown
    shutdown_cache
    memcg_destroy_kmem_caches
    mem_cgroup_css_free
    css_free_rwork_fn
    process_one_work
    worker_thread
    kthread
    ret_from_fork

This patch is based on the observation that if the kmem_cache to be
destroyed is empty then there should not be any objects of this cache in
the quarantine.

Without the patch the script got stuck for couple of hours.  With the
patch the script completed within a second.

Link: http://lkml.kernel.org/r/20180327230603.54721-1-shakeelb@google.comSigned-off-by: default avatarShakeel Butt <shakeelb@google.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1ba586de
...@@ -382,7 +382,8 @@ void kasan_cache_shrink(struct kmem_cache *cache) ...@@ -382,7 +382,8 @@ void kasan_cache_shrink(struct kmem_cache *cache)
void kasan_cache_shutdown(struct kmem_cache *cache) void kasan_cache_shutdown(struct kmem_cache *cache)
{ {
quarantine_remove_cache(cache); if (!__kmem_cache_empty(cache))
quarantine_remove_cache(cache);
} }
size_t kasan_metadata_size(struct kmem_cache *cache) size_t kasan_metadata_size(struct kmem_cache *cache)
......
...@@ -2291,6 +2291,18 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2291,6 +2291,18 @@ static int drain_freelist(struct kmem_cache *cache,
return nr_freed; return nr_freed;
} }
bool __kmem_cache_empty(struct kmem_cache *s)
{
int node;
struct kmem_cache_node *n;
for_each_kmem_cache_node(s, node, n)
if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial))
return false;
return true;
}
int __kmem_cache_shrink(struct kmem_cache *cachep) int __kmem_cache_shrink(struct kmem_cache *cachep)
{ {
int ret = 0; int ret = 0;
......
...@@ -166,6 +166,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, ...@@ -166,6 +166,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
SLAB_TEMPORARY | \ SLAB_TEMPORARY | \
SLAB_ACCOUNT) SLAB_ACCOUNT)
bool __kmem_cache_empty(struct kmem_cache *);
int __kmem_cache_shutdown(struct kmem_cache *); int __kmem_cache_shutdown(struct kmem_cache *);
void __kmem_cache_release(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *);
int __kmem_cache_shrink(struct kmem_cache *); int __kmem_cache_shrink(struct kmem_cache *);
......
...@@ -3696,6 +3696,17 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -3696,6 +3696,17 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
discard_slab(s, page); discard_slab(s, page);
} }
bool __kmem_cache_empty(struct kmem_cache *s)
{
int node;
struct kmem_cache_node *n;
for_each_kmem_cache_node(s, node, n)
if (n->nr_partial || slabs_node(s, node))
return false;
return true;
}
/* /*
* Release all resources used by a slab cache. * Release all resources used by a slab cache.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment