Commit 1c84724c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-fixes-for-6.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fixes from Vlastimil Babka:

 - stable fix to prevent list corruption when destroying caches with
   leftover objects (Rafael Aquini)

 - fix for a gotcha in kmalloc_size_roundup() when calling it with too
   high size, discovered when recently a networking call site had to be
   fixed for a different issue (David Laight)

* tag 'slab-fixes-for-6.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  slab: kmalloc_size_roundup() must not return 0 for non-zero size
  mm/slab_common: fix slab_caches list corruption after kmem_cache_destroy()
parents 6edc84bc 8446a4de
...@@ -479,7 +479,7 @@ void slab_kmem_cache_release(struct kmem_cache *s) ...@@ -479,7 +479,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
int refcnt; int err = -EBUSY;
bool rcu_set; bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s)) if (unlikely(!s) || !kasan_check_byte(s))
...@@ -490,17 +490,17 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -490,17 +490,17 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
refcnt = --s->refcount; s->refcount--;
if (refcnt) if (s->refcount)
goto out_unlock; goto out_unlock;
WARN(shutdown_cache(s), err = shutdown_cache(s);
"%s %s: Slab cache still has objects when called from %pS", WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_); __func__, s->name, (void *)_RET_IP_);
out_unlock: out_unlock:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
cpus_read_unlock(); cpus_read_unlock();
if (!refcnt && !rcu_set) if (!err && !rcu_set)
kmem_cache_release(s); kmem_cache_release(s);
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
...@@ -745,24 +745,24 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller) ...@@ -745,24 +745,24 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
size_t kmalloc_size_roundup(size_t size) size_t kmalloc_size_roundup(size_t size)
{ {
struct kmem_cache *c; if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
/*
* The flags don't matter since size_index is common to all.
* Neither does the caller for just getting ->object_size.
*/
return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
}
/* Short-circuit the 0 size case. */
if (unlikely(size == 0))
return 0;
/* Short-circuit saturated "too-large" case. */
if (unlikely(size == SIZE_MAX))
return SIZE_MAX;
/* Above the smaller buckets, size is a multiple of page size. */ /* Above the smaller buckets, size is a multiple of page size. */
if (size > KMALLOC_MAX_CACHE_SIZE) if (size && size <= KMALLOC_MAX_SIZE)
return PAGE_SIZE << get_order(size); return PAGE_SIZE << get_order(size);
/* /*
* The flags don't matter since size_index is common to all. * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
* Neither does the caller for just getting ->object_size. * and very large size - kmalloc() may fail.
*/ */
c = kmalloc_slab(size, GFP_KERNEL, 0); return size;
return c ? c->object_size : 0;
} }
EXPORT_SYMBOL(kmalloc_size_roundup); EXPORT_SYMBOL(kmalloc_size_roundup);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment