Commit 3f1dd33f authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slab: suppress warnings in test_leak_destroy kunit test

The test_leak_destroy kunit test intends to test the detection of stray
objects in kmem_cache_destroy(), which normally produces a warning. The
other slab kunit tests suppress the warnings in the kunit test context,
so suppress warnings and related printk output in this test as well.
Automated test running environments then don't need to learn to filter
the warnings.

Also rename the test's kmem_cache, the name was wrongly copy-pasted from
test_kfree_rcu.

Fixes: 4e1c44b3 ("kunit, slub: add test_kfree_rcu() and test_leak_destroy()")
Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202408251723.42f3d902-oliver.sang@intel.comReported-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Closes: https://lore.kernel.org/all/CAB=+i9RHHbfSkmUuLshXGY_ifEZg9vCZi3fqr99+kmmnpDus7Q@mail.gmail.com/Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Closes: https://lore.kernel.org/all/6fcb1252-7990-4f0d-8027-5e83f0fb9409@roeck-us.net/Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 3c5d61ae
......@@ -177,13 +177,13 @@ static void test_kfree_rcu(struct kunit *test)
static void test_leak_destroy(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
64, SLAB_NO_MERGE);
kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_destroy(s);
KUNIT_EXPECT_EQ(test, 1, slab_errors);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
}
static int test_init(struct kunit *test)
......
......@@ -546,6 +546,12 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
return false;
}
#if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
bool slab_in_kunit_test(void);
#else
static inline bool slab_in_kunit_test(void) { return false; }
#endif
#ifdef CONFIG_SLAB_OBJ_EXT
/*
......
......@@ -508,8 +508,9 @@ void kmem_cache_destroy(struct kmem_cache *s)
kasan_cache_shutdown(s);
err = __kmem_cache_shutdown(s);
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
if (!slab_in_kunit_test())
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
list_del(&s->list);
......
......@@ -827,7 +827,7 @@ static bool slab_add_kunit_errors(void)
return true;
}
static bool slab_in_kunit_test(void)
bool slab_in_kunit_test(void)
{
struct kunit_resource *resource;
......@@ -843,7 +843,6 @@ static bool slab_in_kunit_test(void)
}
#else
static inline bool slab_add_kunit_errors(void) { return false; }
static inline bool slab_in_kunit_test(void) { return false; }
#endif
static inline unsigned int size_from_object(struct kmem_cache *s)
......@@ -5436,6 +5435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
for_each_object(p, s, addr, slab->objects) {
if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
if (slab_add_kunit_errors())
continue;
pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment