Commit f77d0cda authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slab: move kfence_shutdown_cache() outside slab_mutex

kfence_shutdown_cache() is called under slab_mutex when the cache is
destroyed synchronously, and outside slab_mutex during the delayed
destruction of SLAB_TYPESAFE_BY_RCU caches.

It seems it should always be safe to call it outside of slab_mutex so we
can just move the call to kmem_cache_release(), which is called outside.
Reviewed-by: default avatarJann Horn <jannh@google.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 4ec10268
...@@ -492,6 +492,7 @@ EXPORT_SYMBOL(kmem_buckets_create); ...@@ -492,6 +492,7 @@ EXPORT_SYMBOL(kmem_buckets_create);
*/ */
static void kmem_cache_release(struct kmem_cache *s) static void kmem_cache_release(struct kmem_cache *s)
{ {
kfence_shutdown_cache(s);
if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL) if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
sysfs_slab_release(s); sysfs_slab_release(s);
else else
...@@ -521,10 +522,8 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) ...@@ -521,10 +522,8 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier(); rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) { list_for_each_entry_safe(s, s2, &to_destroy, list)
kfence_shutdown_cache(s);
kmem_cache_release(s); kmem_cache_release(s);
}
} }
void slab_kmem_cache_release(struct kmem_cache *s) void slab_kmem_cache_release(struct kmem_cache *s)
...@@ -563,9 +562,6 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -563,9 +562,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
list_del(&s->list); list_del(&s->list);
if (!err && !rcu_set)
kfence_shutdown_cache(s);
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
cpus_read_unlock(); cpus_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment