Commit f6785e0c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-for-6.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fixes from Vlastimil Babka:
 "Fixes for issues introduced in this merge window: kobject memory leak,
  unsupressed warning and possible lockup in new slub_kunit tests,
  misleading code in kvfree_rcu_queue_batch()"

* tag 'slab-for-6.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  slub/kunit: skip test_kfree_rcu when the slub kunit test is built-in
  mm, slab: suppress warnings in test_leak_destroy kunit test
  rcu/kvfree: Refactor kvfree_rcu_queue_batch()
  mm, slab: fix use of SLAB_SUPPORTS_SYSFS in kmem_cache_release()
parents e1043b67 cac39b07
...@@ -3607,11 +3607,12 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) ...@@ -3607,11 +3607,12 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
} }
// One work is per one batch, so there are three // One work is per one batch, so there are three
// "free channels", the batch can handle. It can // "free channels", the batch can handle. Break
// be that the work is in the pending state when // the loop since it is done with this CPU thus
// channels have been detached following by each // queuing an RCU work is _always_ success here.
// other.
queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work); queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
WARN_ON_ONCE(!queued);
break;
} }
} }
......
...@@ -164,10 +164,16 @@ struct test_kfree_rcu_struct { ...@@ -164,10 +164,16 @@ struct test_kfree_rcu_struct {
static void test_kfree_rcu(struct kunit *test) static void test_kfree_rcu(struct kunit *test)
{ {
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu", struct kmem_cache *s;
struct test_kfree_rcu_struct *p;
if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
kunit_skip(test, "can't do kfree_rcu() when test is built-in");
s = test_kmem_cache_create("TestSlub_kfree_rcu",
sizeof(struct test_kfree_rcu_struct), sizeof(struct test_kfree_rcu_struct),
SLAB_NO_MERGE); SLAB_NO_MERGE);
struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL); p = kmem_cache_alloc(s, GFP_KERNEL);
kfree_rcu(p, rcu); kfree_rcu(p, rcu);
kmem_cache_destroy(s); kmem_cache_destroy(s);
...@@ -177,13 +183,13 @@ static void test_kfree_rcu(struct kunit *test) ...@@ -177,13 +183,13 @@ static void test_kfree_rcu(struct kunit *test)
static void test_leak_destroy(struct kunit *test) static void test_leak_destroy(struct kunit *test)
{ {
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu", struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
64, SLAB_NO_MERGE); 64, SLAB_NO_MERGE);
kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_destroy(s); kmem_cache_destroy(s);
KUNIT_EXPECT_EQ(test, 1, slab_errors); KUNIT_EXPECT_EQ(test, 2, slab_errors);
} }
static int test_init(struct kunit *test) static int test_init(struct kunit *test)
......
...@@ -310,7 +310,7 @@ struct kmem_cache { ...@@ -310,7 +310,7 @@ struct kmem_cache {
}; };
#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY) #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
#define SLAB_SUPPORTS_SYSFS #define SLAB_SUPPORTS_SYSFS 1
void sysfs_slab_unlink(struct kmem_cache *s); void sysfs_slab_unlink(struct kmem_cache *s);
void sysfs_slab_release(struct kmem_cache *s); void sysfs_slab_release(struct kmem_cache *s);
#else #else
...@@ -546,6 +546,12 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla ...@@ -546,6 +546,12 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
return false; return false;
} }
#if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
bool slab_in_kunit_test(void);
#else
static inline bool slab_in_kunit_test(void) { return false; }
#endif
#ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_SLAB_OBJ_EXT
/* /*
......
...@@ -508,6 +508,7 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -508,6 +508,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
kasan_cache_shutdown(s); kasan_cache_shutdown(s);
err = __kmem_cache_shutdown(s); err = __kmem_cache_shutdown(s);
if (!slab_in_kunit_test())
WARN(err, "%s %s: Slab cache still has objects when called from %pS", WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_); __func__, s->name, (void *)_RET_IP_);
......
...@@ -827,7 +827,7 @@ static bool slab_add_kunit_errors(void) ...@@ -827,7 +827,7 @@ static bool slab_add_kunit_errors(void)
return true; return true;
} }
static bool slab_in_kunit_test(void) bool slab_in_kunit_test(void)
{ {
struct kunit_resource *resource; struct kunit_resource *resource;
...@@ -843,7 +843,6 @@ static bool slab_in_kunit_test(void) ...@@ -843,7 +843,6 @@ static bool slab_in_kunit_test(void)
} }
#else #else
static inline bool slab_add_kunit_errors(void) { return false; } static inline bool slab_add_kunit_errors(void) { return false; }
static inline bool slab_in_kunit_test(void) { return false; }
#endif #endif
static inline unsigned int size_from_object(struct kmem_cache *s) static inline unsigned int size_from_object(struct kmem_cache *s)
...@@ -5436,6 +5435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab, ...@@ -5436,6 +5435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
for_each_object(p, s, addr, slab->objects) { for_each_object(p, s, addr, slab->objects) {
if (!test_bit(__obj_to_index(s, addr, p), object_map)) { if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
if (slab_add_kunit_errors())
continue;
pr_err("Object 0x%p @offset=%tu\n", p, p - addr); pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p); print_tracking(s, p);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment