Commit 9325b8b5 authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Andrew Morton

tools: add skeleton code for userland testing of VMA logic

Establish a new userland VMA unit testing implementation under
tools/testing which utilises existing logic providing maple tree support
in userland utilising the now-shared code previously exclusive to radix
tree testing.

This provides fundamental VMA operations whose API is defined in mm/vma.h,
while stubbing out superfluous functionality.

This exists as a proof-of-concept, with the test implementation functional
and sufficient to allow userland compilation of vma.c, but containing only
cursory tests to demonstrate basic functionality.

Link: https://lkml.kernel.org/r/533ffa2eec771cbe6b387dd049a7f128a53eb616.1722251717.git.lorenzo.stoakes@oracle.comSigned-off-by: default avatarLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Tested-by: default avatarSeongJae Park <sj@kernel.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Gow <davidgow@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Kees Cook <kees@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Rae Moar <rmoar@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Pengfei Xu <pengfei.xu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 74579d8d
......@@ -24423,6 +24423,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: mm/vma.c
F: mm/vma.h
F: mm/vma_internal.h
F: tools/testing/vma/
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
......
# SPDX-License-Identifier: GPL-2.0-only
generated/bit-length.h
generated/map-shift.h
generated/autoconf.h
idr.c
radix-tree.c
vma
# SPDX-License-Identifier: GPL-2.0-or-later
.PHONY: default
default: vma
include ../shared/shared.mk
OFILES = $(SHARED_OFILES) vma.o maple-shim.o
TARGETS = vma
vma: $(OFILES) vma_internal.h ../../../mm/vma.c ../../../mm/vma.h
$(CC) $(CFLAGS) -o $@ $(OFILES) $(LDLIBS)
clean:
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h generated/bit-length.h generated/autoconf.h
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#define atomic_t int32_t
#define atomic_inc(x) uatomic_inc(x)
#define atomic_read(x) uatomic_read(x)
#define atomic_set(x, y) do {} while (0)
#define U8_MAX UCHAR_MAX
#endif /* _LINUX_ATOMIC_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H
#include <linux/atomic.h>
struct pglist_data *first_online_pgdat(void);
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
#define for_each_online_pgdat(pgdat) \
for (pgdat = first_online_pgdat(); \
pgdat; \
pgdat = next_online_pgdat(pgdat))
enum zone_type {
__MAX_NR_ZONES
};
#define MAX_NR_ZONES __MAX_NR_ZONES
#define MAX_PAGE_ORDER 10
#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
#define pageblock_order MAX_PAGE_ORDER
#define pageblock_nr_pages BIT(pageblock_order)
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
struct zone {
atomic_long_t managed_pages;
};
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
} pg_data_t;
#endif /* _LINUX_MMZONE_H */
// SPDX-License-Identifier: GPL-2.0-or-later
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "maple-shared.h"
#include "vma_internal.h"
/*
* Directly import the VMA implementation here. Our vma_internal.h wrapper
* provides userland-equivalent functionality for everything vma.c uses.
*/
#include "../../../mm/vma.c"
const struct vm_operations_struct vma_dummy_vm_ops;
#define ASSERT_TRUE(_expr) \
do { \
if (!(_expr)) { \
fprintf(stderr, \
"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
__FILE__, __LINE__, __FUNCTION__, #_expr); \
return false; \
} \
} while (0)
#define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start,
unsigned long end,
pgoff_t pgoff,
vm_flags_t flags)
{
struct vm_area_struct *ret = vm_area_alloc(mm);
if (ret == NULL)
return NULL;
ret->vm_start = start;
ret->vm_end = end;
ret->vm_pgoff = pgoff;
ret->__vm_flags = flags;
return ret;
}
static bool test_simple_merge(void)
{
struct vm_area_struct *vma;
unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
struct vm_area_struct *vma_middle = alloc_vma(&mm, 0x1000, 0x2000, 1, flags);
struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
ASSERT_FALSE(vma_link(&mm, vma_left));
ASSERT_FALSE(vma_link(&mm, vma_middle));
ASSERT_FALSE(vma_link(&mm, vma_right));
vma = vma_merge_new_vma(&vmi, vma_left, vma_middle, 0x1000,
0x2000, 1);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->vm_flags, flags);
vm_area_free(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_simple_modify(void)
{
struct vm_area_struct *vma;
unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
ASSERT_FALSE(vma_link(&mm, init_vma));
/*
* The flags will not be changed, the vma_modify_flags() function
* performs the merge/split only.
*/
vma = vma_modify_flags(&vmi, init_vma, init_vma,
0x1000, 0x2000, VM_READ | VM_MAYREAD);
ASSERT_NE(vma, NULL);
/* We modify the provided VMA, and on split allocate new VMAs. */
ASSERT_EQ(vma, init_vma);
ASSERT_EQ(vma->vm_start, 0x1000);
ASSERT_EQ(vma->vm_end, 0x2000);
ASSERT_EQ(vma->vm_pgoff, 1);
/*
* Now walk through the three split VMAs and make sure they are as
* expected.
*/
vma_iter_set(&vmi, 0);
vma = vma_iter_load(&vmi);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x1000);
ASSERT_EQ(vma->vm_pgoff, 0);
vm_area_free(vma);
vma_iter_clear(&vmi);
vma = vma_next(&vmi);
ASSERT_EQ(vma->vm_start, 0x1000);
ASSERT_EQ(vma->vm_end, 0x2000);
ASSERT_EQ(vma->vm_pgoff, 1);
vm_area_free(vma);
vma_iter_clear(&vmi);
vma = vma_next(&vmi);
ASSERT_EQ(vma->vm_start, 0x2000);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 2);
vm_area_free(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_simple_expand(void)
{
unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
VMA_ITERATOR(vmi, &mm, 0);
ASSERT_FALSE(vma_link(&mm, vma));
ASSERT_FALSE(vma_expand(&vmi, vma, 0, 0x3000, 0, NULL));
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
vm_area_free(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_simple_shrink(void)
{
unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
VMA_ITERATOR(vmi, &mm, 0);
ASSERT_FALSE(vma_link(&mm, vma));
ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x1000);
ASSERT_EQ(vma->vm_pgoff, 0);
vm_area_free(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
int main(void)
{
int num_tests = 0, num_fail = 0;
maple_tree_init();
#define TEST(name) \
do { \
num_tests++; \
if (!test_##name()) { \
num_fail++; \
fprintf(stderr, "Test " #name " FAILED\n"); \
} \
} while (0)
TEST(simple_merge);
TEST(simple_modify);
TEST(simple_expand);
TEST(simple_shrink);
#undef TEST
printf("%d tests run, %d passed, %d failed.\n",
num_tests, num_tests - num_fail, num_fail);
return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
}
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment