Commit b2c5bd4c authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

kasan: migrate workqueue_uaf test to kunit

Migrate the workqueue_uaf test to the KUnit framework.

Initially, this test was intended to check that Generic KASAN prints
auxiliary stack traces for workqueues.  Nevertheless, the test is enabled
for all modes to make that KASAN reports bad accesses in the tested
scenario.

The presence of auxiliary stack traces for the Generic mode needs to be
inspected manually.

Link: https://lkml.kernel.org/r/1d81b6cc2a58985126283d1e0de8e663716dd930.1664298455.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8516e837
......@@ -1141,6 +1141,14 @@ static void kmalloc_double_kzfree(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
}
/*
* The two tests below check that Generic KASAN prints auxiliary stack traces
* for RCU callbacks and workqueues. The reports need to be inspected manually.
*
* These tests are still enabled for other KASAN modes to make sure that all
* modes report bad accesses in tested scenarios.
*/
static struct kasan_rcu_info {
int i;
struct rcu_head rcu;
......@@ -1155,13 +1163,6 @@ static void rcu_uaf_reclaim(struct rcu_head *rp)
((volatile struct kasan_rcu_info *)fp)->i;
}
/*
* Check that Generic KASAN prints auxiliary stack traces for RCU callbacks.
* The report needs to be inspected manually.
*
* This test is still enabled for other KASAN modes to make sure that all modes
* report bad accesses in tested scenarios.
*/
static void rcu_uaf(struct kunit *test)
{
struct kasan_rcu_info *ptr;
......@@ -1177,6 +1178,30 @@ static void rcu_uaf(struct kunit *test)
rcu_barrier());
}
static void workqueue_uaf_work(struct work_struct *work)
{
kfree(work);
}
static void workqueue_uaf(struct kunit *test)
{
struct workqueue_struct *workqueue;
struct work_struct *work;
workqueue = create_workqueue("kasan_workqueue_test");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
INIT_WORK(work, workqueue_uaf_work);
queue_work(workqueue, work);
destroy_workqueue(workqueue);
KUNIT_EXPECT_KASAN_FAIL(test,
((volatile struct work_struct *)work)->data);
}
static void vmalloc_helpers_tags(struct kunit *test)
{
void *ptr;
......@@ -1509,6 +1534,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(rcu_uaf),
KUNIT_CASE(workqueue_uaf),
KUNIT_CASE(vmalloc_helpers_tags),
KUNIT_CASE(vmalloc_oob),
KUNIT_CASE(vmap_tags),
......
......@@ -62,35 +62,6 @@ static noinline void __init copy_user_test(void)
kfree(kmem);
}
static noinline void __init kasan_workqueue_work(struct work_struct *work)
{
kfree(work);
}
static noinline void __init kasan_workqueue_uaf(void)
{
struct workqueue_struct *workqueue;
struct work_struct *work;
workqueue = create_workqueue("kasan_wq_test");
if (!workqueue) {
pr_err("Allocation failed\n");
return;
}
work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
if (!work) {
pr_err("Allocation failed\n");
return;
}
INIT_WORK(work, kasan_workqueue_work);
queue_work(workqueue, work);
destroy_workqueue(workqueue);
pr_info("use-after-free on workqueue\n");
((volatile struct work_struct *)work)->data;
}
static int __init test_kasan_module_init(void)
{
/*
......@@ -101,7 +72,6 @@ static int __init test_kasan_module_init(void)
bool multishot = kasan_save_enable_multi_shot();
copy_user_test();
kasan_workqueue_uaf();
kasan_restore_multi_shot(multishot);
return -EAGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment