Commit d330076e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-for-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fix from Vlastimil Babka:

 - A fix from Waiman Long to avoid a theoretical deadlock reported by
   lockdep.

* tag 'slab-for-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm/slab_common: Deleting kobject in kmem_cache_destroy() without holding slab_mutex/cpu_hotplug_lock
parents 2880e1a1 0495e337
......@@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
}
EXPORT_SYMBOL(kmem_cache_create);
#ifdef SLAB_SUPPORTS_SYSFS
/*
* For a given kmem_cache, kmem_cache_destroy() should only be called
* once or there will be a use-after-free problem. The actual deletion
* and release of the kobject does not need slab_mutex or cpu_hotplug_lock
* protection. So they are now done without holding those locks.
*
* Note that there will be a slight delay in the deletion of sysfs files
* if kmem_cache_release() is called indrectly from a work function.
*/
static void kmem_cache_release(struct kmem_cache *s)
{
sysfs_slab_unlink(s);
sysfs_slab_release(s);
}
#else
static void kmem_cache_release(struct kmem_cache *s)
{
slab_kmem_cache_release(s);
}
#endif
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
LIST_HEAD(to_destroy);
......@@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
list_for_each_entry_safe(s, s2, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);
#endif
kmem_cache_release(s);
}
}
......@@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
debugfs_slab_release(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);
#endif
}
return 0;
......@@ -465,14 +474,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
int refcnt;
if (unlikely(!s) || !kasan_check_byte(s))
return;
cpus_read_lock();
mutex_lock(&slab_mutex);
s->refcount--;
if (s->refcount)
refcnt = --s->refcount;
if (refcnt)
goto out_unlock;
WARN(shutdown_cache(s),
......@@ -481,6 +492,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment