Commit 120b1162 authored by Liam Howlett's avatar Liam Howlett Committed by Andrew Morton

maple_tree: reorganize testing to restore module testing

Along the development cycle, the testing code support for module/in-kernel
compiles was removed.  Restore this functionality by moving any internal
API tests to the userspace side, as well as threading tests.  Fix the
lockdep issues and add a way to reduce memory usage so the tests can
complete with KASAN + memleak detection.  Make the tests work on 32 bit
hosts where possible and detect 32 bit hosts in the radix test suite.

[akpm@linux-foundation.org: fix module export]
[akpm@linux-foundation.org: fix it some more]
[liam.howlett@oracle.com: fix compile warnings on 32bit build in check_find()]
  Link: https://lkml.kernel.org/r/20221107203816.1260327-1-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20221028180415.3074673-1-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9a887877
...@@ -638,6 +638,12 @@ static inline void mt_set_in_rcu(struct maple_tree *mt) ...@@ -638,6 +638,12 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
} }
} }
static inline unsigned int mt_height(const struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
}
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max); void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
void *mt_find_after(struct maple_tree *mt, unsigned long *index, void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max); unsigned long max);
...@@ -664,6 +670,7 @@ extern atomic_t maple_tree_tests_passed; ...@@ -664,6 +670,7 @@ extern atomic_t maple_tree_tests_passed;
void mt_dump(const struct maple_tree *mt); void mt_dump(const struct maple_tree *mt);
void mt_validate(struct maple_tree *mt); void mt_validate(struct maple_tree *mt);
void mt_cache_shrink(void);
#define MT_BUG_ON(__tree, __x) do { \ #define MT_BUG_ON(__tree, __x) do { \
atomic_inc(&maple_tree_tests_run); \ atomic_inc(&maple_tree_tests_run); \
if (__x) { \ if (__x) { \
......
...@@ -2241,6 +2241,10 @@ config TEST_UUID ...@@ -2241,6 +2241,10 @@ config TEST_UUID
config TEST_XARRAY config TEST_XARRAY
tristate "Test the XArray code at runtime" tristate "Test the XArray code at runtime"
config TEST_MAPLE_TREE
select DEBUG_MAPLE_TREE
tristate "Test the Maple Tree code at runtime"
config TEST_RHASHTABLE config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table" tristate "Perform selftest on resizable hash table"
help help
......
...@@ -85,6 +85,7 @@ obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o ...@@ -85,6 +85,7 @@ obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
......
...@@ -183,10 +183,6 @@ static void ma_free_rcu(struct maple_node *node) ...@@ -183,10 +183,6 @@ static void ma_free_rcu(struct maple_node *node)
call_rcu(&node->rcu, mt_free_rcu); call_rcu(&node->rcu, mt_free_rcu);
} }
static unsigned int mt_height(const struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
}
static void mas_set_height(struct ma_state *mas) static void mas_set_height(struct ma_state *mas)
{ {
...@@ -5061,6 +5057,7 @@ void *mas_walk(struct ma_state *mas) ...@@ -5061,6 +5057,7 @@ void *mas_walk(struct ma_state *mas)
return entry; return entry;
} }
EXPORT_SYMBOL_GPL(mas_walk);
static inline bool mas_rewind_node(struct ma_state *mas) static inline bool mas_rewind_node(struct ma_state *mas)
{ {
...@@ -5272,6 +5269,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, ...@@ -5272,6 +5269,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
mas->last = mas->index + size - 1; mas->last = mas->index + size - 1;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mas_empty_area);
/* /*
* mas_empty_area_rev() - Get the highest address within the range that is * mas_empty_area_rev() - Get the highest address within the range that is
...@@ -5335,6 +5333,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, ...@@ -5335,6 +5333,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
mas->index = mas->last - size + 1; mas->index = mas->last - size + 1;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
static inline int mas_alloc(struct ma_state *mas, void *entry, static inline int mas_alloc(struct ma_state *mas, void *entry,
unsigned long size, unsigned long *index) unsigned long size, unsigned long *index)
...@@ -5656,6 +5655,7 @@ void *mas_store(struct ma_state *mas, void *entry) ...@@ -5656,6 +5655,7 @@ void *mas_store(struct ma_state *mas, void *entry)
mas_wr_store_entry(&wr_mas); mas_wr_store_entry(&wr_mas);
return wr_mas.content; return wr_mas.content;
} }
EXPORT_SYMBOL_GPL(mas_store);
/** /**
* mas_store_gfp() - Store a value into the tree. * mas_store_gfp() - Store a value into the tree.
...@@ -5682,6 +5682,7 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) ...@@ -5682,6 +5682,7 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mas_store_gfp);
/** /**
* mas_store_prealloc() - Store a value into the tree using memory * mas_store_prealloc() - Store a value into the tree using memory
...@@ -5699,6 +5700,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) ...@@ -5699,6 +5700,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
BUG_ON(mas_is_err(mas)); BUG_ON(mas_is_err(mas));
mas_destroy(mas); mas_destroy(mas);
} }
EXPORT_SYMBOL_GPL(mas_store_prealloc);
/** /**
* mas_preallocate() - Preallocate enough nodes for a store operation * mas_preallocate() - Preallocate enough nodes for a store operation
...@@ -5768,6 +5770,7 @@ void mas_destroy(struct ma_state *mas) ...@@ -5768,6 +5770,7 @@ void mas_destroy(struct ma_state *mas)
} }
mas->alloc = NULL; mas->alloc = NULL;
} }
EXPORT_SYMBOL_GPL(mas_destroy);
/* /*
* mas_expected_entries() - Set the expected number of entries that will be inserted. * mas_expected_entries() - Set the expected number of entries that will be inserted.
...@@ -5829,6 +5832,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) ...@@ -5829,6 +5832,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(mas_expected_entries);
/** /**
* mas_next() - Get the next entry. * mas_next() - Get the next entry.
...@@ -6009,6 +6013,7 @@ void *mas_find(struct ma_state *mas, unsigned long max) ...@@ -6009,6 +6013,7 @@ void *mas_find(struct ma_state *mas, unsigned long max)
/* Retries on dead nodes handled by mas_next_entry */ /* Retries on dead nodes handled by mas_next_entry */
return mas_next_entry(mas, max); return mas_next_entry(mas, max);
} }
EXPORT_SYMBOL_GPL(mas_find);
/** /**
* mas_find_rev: On the first call, find the first non-null entry at or below * mas_find_rev: On the first call, find the first non-null entry at or below
...@@ -6055,7 +6060,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min) ...@@ -6055,7 +6060,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
/* Retries on dead nodes handled by mas_next_entry */ /* Retries on dead nodes handled by mas_next_entry */
return mas_prev_entry(mas, min); return mas_prev_entry(mas, min);
} }
EXPORT_SYMBOL_GPL(mas_find); EXPORT_SYMBOL_GPL(mas_find_rev);
/** /**
* mas_erase() - Find the range in which index resides and erase the entire * mas_erase() - Find the range in which index resides and erase the entire
...@@ -6537,8 +6542,27 @@ static inline int mas_dead_node(struct ma_state *mas, unsigned long index) ...@@ -6537,8 +6542,27 @@ static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
mas_rewalk(mas, index); mas_rewalk(mas, index);
return 1; return 1;
} }
#endif /* not defined __KERNEL__ */
void mt_cache_shrink(void)
{
}
#else
/*
* mt_cache_shrink() - For testing, don't use this.
*
* Certain testcases can trigger an OOM when combined with other memory
* debugging configuration options. This function is used to reduce the
* possibility of an out of memory even due to kmem_cache objects remaining
* around for longer than usual.
*/
void mt_cache_shrink(void)
{
kmem_cache_shrink(maple_node_cache);
}
EXPORT_SYMBOL_GPL(mt_cache_shrink);
#endif /* not defined __KERNEL__ */
/* /*
* mas_get_slot() - Get the entry in the maple state node stored at @offset. * mas_get_slot() - Get the entry in the maple state node stored at @offset.
* @mas: The maple state * @mas: The maple state
...@@ -6812,6 +6836,7 @@ void mt_dump(const struct maple_tree *mt) ...@@ -6812,6 +6836,7 @@ void mt_dump(const struct maple_tree *mt)
else if (entry) else if (entry)
mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0); mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
} }
EXPORT_SYMBOL_GPL(mt_dump);
/* /*
* Calculate the maximum gap in a node and check if that's what is reported in * Calculate the maximum gap in a node and check if that's what is reported in
...@@ -7122,5 +7147,6 @@ void mt_validate(struct maple_tree *mt) ...@@ -7122,5 +7147,6 @@ void mt_validate(struct maple_tree *mt)
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(mt_validate);
#endif /* CONFIG_DEBUG_MAPLE_TREE */ #endif /* CONFIG_DEBUG_MAPLE_TREE */
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
generated/bit-length.h
generated/map-shift.h generated/map-shift.h
idr.c idr.c
idr-test idr-test
......
...@@ -18,9 +18,14 @@ endif ...@@ -18,9 +18,14 @@ endif
ifeq ($(BUILD), 32) ifeq ($(BUILD), 32)
CFLAGS += -m32 CFLAGS += -m32
LDFLAGS += -m32 LDFLAGS += -m32
LONG_BIT := 32
endif endif
targets: generated/map-shift.h $(TARGETS) ifndef LONG_BIT
LONG_BIT := $(shell getconf LONG_BIT)
endif
targets: generated/map-shift.h generated/bit-length.h $(TARGETS)
main: $(OFILES) main: $(OFILES)
...@@ -34,11 +39,11 @@ maple: $(CORE_OFILES) ...@@ -34,11 +39,11 @@ maple: $(CORE_OFILES)
multiorder: multiorder.o $(CORE_OFILES) multiorder: multiorder.o $(CORE_OFILES)
clean: clean:
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h generated/bit-length.h
vpath %.c ../../lib vpath %.c ../../lib
$(OFILES): Makefile *.h */*.h generated/map-shift.h \ $(OFILES): Makefile *.h */*.h generated/map-shift.h generated/bit-length.h \
../../include/linux/*.h \ ../../include/linux/*.h \
../../include/asm/*.h \ ../../include/asm/*.h \
../../../include/linux/xarray.h \ ../../../include/linux/xarray.h \
...@@ -61,3 +66,11 @@ generated/map-shift.h: ...@@ -61,3 +66,11 @@ generated/map-shift.h:
echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \ echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
generated/map-shift.h; \ generated/map-shift.h; \
fi fi
generated/bit-length.h: FORCE
@if ! grep -qws CONFIG_$(LONG_BIT)BIT generated/bit-length.h; then \
echo "Generating $@"; \
echo "#define CONFIG_$(LONG_BIT)BIT 1" > $@; \
fi
FORCE: ;
#include "bit-length.h"
#define CONFIG_XARRAY_MULTI 1 #define CONFIG_XARRAY_MULTI 1
#define CONFIG_64BIT 1
...@@ -129,6 +129,10 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) ...@@ -129,6 +129,10 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
pthread_mutex_unlock(&cachep->lock); pthread_mutex_unlock(&cachep->lock);
} }
void kmem_cache_shrink(struct kmem_cache *cachep)
{
}
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
void **p) void **p)
{ {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment