Commit 4805180b authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: use list_head for stack record links

Switch stack_record to use list_head for links in the hash table and in
  the freelist.

This will allow removing entries from the hash table buckets.

This is preparatory patch for implementing the eviction of stack records
  from the stack depot.

Link: https://lkml.kernel.org/r/4787d9a584cd33433d9ee1846b17fa3d3e1987ad.1700502145.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a6cd9570
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kmsan.h> #include <linux/kmsan.h>
#include <linux/list.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/percpu.h> #include <linux/percpu.h>
...@@ -55,7 +56,7 @@ union handle_parts { ...@@ -55,7 +56,7 @@ union handle_parts {
}; };
struct stack_record { struct stack_record {
struct stack_record *next; /* Link in hash table or freelist */ struct list_head list; /* Links in hash table or freelist */
u32 hash; /* Hash in hash table */ u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */ u32 size; /* Number of stored frames */
union handle_parts handle; union handle_parts handle;
...@@ -77,21 +78,21 @@ static bool __stack_depot_early_init_passed __initdata; ...@@ -77,21 +78,21 @@ static bool __stack_depot_early_init_passed __initdata;
/* Initial seed for jhash2. */ /* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c #define STACK_HASH_SEED 0x9747b28c
/* Hash table of pointers to stored stack traces. */ /* Hash table of stored stack records. */
static struct stack_record **stack_table; static struct list_head *stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */ /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order; static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */ /* Hash mask for indexing the table. */
static unsigned int stack_hash_mask; static unsigned int stack_hash_mask;
/* Array of memory regions that store stack traces. */ /* Array of memory regions that store stack records. */
static void *stack_pools[DEPOT_MAX_POOLS]; static void *stack_pools[DEPOT_MAX_POOLS];
/* Newly allocated pool that is not yet added to stack_pools. */ /* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool; static void *new_pool;
/* Number of pools in stack_pools. */ /* Number of pools in stack_pools. */
static int pools_num; static int pools_num;
/* Next stack in the freelist of stack records within stack_pools. */ /* Freelist of stack records within stack_pools. */
static struct stack_record *next_stack; static LIST_HEAD(free_stacks);
/* /*
* Stack depot tries to keep an extra pool allocated even before it runs out * Stack depot tries to keep an extra pool allocated even before it runs out
* of space in the currently used pool. This flag marks whether this extra pool * of space in the currently used pool. This flag marks whether this extra pool
...@@ -116,6 +117,15 @@ void __init stack_depot_request_early_init(void) ...@@ -116,6 +117,15 @@ void __init stack_depot_request_early_init(void)
__stack_depot_early_init_requested = true; __stack_depot_early_init_requested = true;
} }
/* Initialize list_head's within the hash table. */
static void init_stack_table(unsigned long entries)
{
unsigned long i;
for (i = 0; i < entries; i++)
INIT_LIST_HEAD(&stack_table[i]);
}
/* Allocates a hash table via memblock. Can only be used during early boot. */ /* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void) int __init stack_depot_early_init(void)
{ {
...@@ -152,16 +162,16 @@ int __init stack_depot_early_init(void) ...@@ -152,16 +162,16 @@ int __init stack_depot_early_init(void)
/* /*
* If stack_bucket_number_order is not set, leave entries as 0 to rely * If stack_bucket_number_order is not set, leave entries as 0 to rely
* on the automatic calculations performed by alloc_large_system_hash. * on the automatic calculations performed by alloc_large_system_hash().
*/ */
if (stack_bucket_number_order) if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order; entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n"); pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot", stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *), sizeof(struct list_head),
entries, entries,
STACK_HASH_TABLE_SCALE, STACK_HASH_TABLE_SCALE,
HASH_EARLY | HASH_ZERO, HASH_EARLY,
NULL, NULL,
&stack_hash_mask, &stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN, 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
...@@ -171,6 +181,14 @@ int __init stack_depot_early_init(void) ...@@ -171,6 +181,14 @@ int __init stack_depot_early_init(void)
stack_depot_disabled = true; stack_depot_disabled = true;
return -ENOMEM; return -ENOMEM;
} }
if (!entries) {
/*
* Obtain the number of entries that was calculated by
* alloc_large_system_hash().
*/
entries = stack_hash_mask + 1;
}
init_stack_table(entries);
return 0; return 0;
} }
...@@ -211,7 +229,7 @@ int stack_depot_init(void) ...@@ -211,7 +229,7 @@ int stack_depot_init(void)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
if (!stack_table) { if (!stack_table) {
pr_err("hash table allocation failed, disabling\n"); pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true; stack_depot_disabled = true;
...@@ -219,6 +237,7 @@ int stack_depot_init(void) ...@@ -219,6 +237,7 @@ int stack_depot_init(void)
goto out_unlock; goto out_unlock;
} }
stack_hash_mask = entries - 1; stack_hash_mask = entries - 1;
init_stack_table(entries);
out_unlock: out_unlock:
mutex_unlock(&stack_depot_init_mutex); mutex_unlock(&stack_depot_init_mutex);
...@@ -230,31 +249,24 @@ EXPORT_SYMBOL_GPL(stack_depot_init); ...@@ -230,31 +249,24 @@ EXPORT_SYMBOL_GPL(stack_depot_init);
/* Initializes a stack depol pool. */ /* Initializes a stack depol pool. */
static void depot_init_pool(void *pool) static void depot_init_pool(void *pool)
{ {
const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE; int offset;
int i, offset;
lockdep_assert_held_write(&pool_rwlock); lockdep_assert_held_write(&pool_rwlock);
/* Initialize handles and link stack records to each other. */ WARN_ON(!list_empty(&free_stacks));
for (i = 0, offset = 0;
offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE; /* Initialize handles and link stack records into the freelist. */
i++, offset += DEPOT_STACK_RECORD_SIZE) { for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
offset += DEPOT_STACK_RECORD_SIZE) {
struct stack_record *stack = pool + offset; struct stack_record *stack = pool + offset;
stack->handle.pool_index = pools_num; stack->handle.pool_index = pools_num;
stack->handle.offset = offset >> DEPOT_STACK_ALIGN; stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
stack->handle.extra = 0; stack->handle.extra = 0;
if (i < records_in_pool - 1) list_add(&stack->list, &free_stacks);
stack->next = (void *)stack + DEPOT_STACK_RECORD_SIZE;
else
stack->next = NULL;
} }
/* Link stack records into the freelist. */
WARN_ON(next_stack);
next_stack = pool;
/* Save reference to the pool to be used by depot_fetch_stack(). */ /* Save reference to the pool to be used by depot_fetch_stack(). */
stack_pools[pools_num] = pool; stack_pools[pools_num] = pool;
pools_num++; pools_num++;
...@@ -295,7 +307,7 @@ static bool depot_update_pools(void **prealloc) ...@@ -295,7 +307,7 @@ static bool depot_update_pools(void **prealloc)
lockdep_assert_held_write(&pool_rwlock); lockdep_assert_held_write(&pool_rwlock);
/* Check if we still have objects in the freelist. */ /* Check if we still have objects in the freelist. */
if (next_stack) if (!list_empty(&free_stacks))
goto out_keep_prealloc; goto out_keep_prealloc;
/* Check if we have a new pool saved and use it. */ /* Check if we have a new pool saved and use it. */
...@@ -346,19 +358,18 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) ...@@ -346,19 +358,18 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return NULL; return NULL;
/* Check if we have a stack record to save the stack trace. */ /* Check if we have a stack record to save the stack trace. */
stack = next_stack; if (list_empty(&free_stacks))
if (!stack)
return NULL; return NULL;
/* Advance the freelist. */ /* Get and unlink the first entry from the freelist. */
next_stack = stack->next; stack = list_first_entry(&free_stacks, struct stack_record, list);
list_del(&stack->list);
/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
if (size > CONFIG_STACKDEPOT_MAX_FRAMES) if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
size = CONFIG_STACKDEPOT_MAX_FRAMES; size = CONFIG_STACKDEPOT_MAX_FRAMES;
/* Save the stack trace. */ /* Save the stack trace. */
stack->next = NULL;
stack->hash = hash; stack->hash = hash;
stack->size = size; stack->size = size;
/* stack->handle is already filled in by depot_init_pool(). */ /* stack->handle is already filled in by depot_init_pool(). */
...@@ -420,15 +431,17 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, ...@@ -420,15 +431,17 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
} }
/* Finds a stack in a bucket of the hash table. */ /* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct stack_record *bucket, static inline struct stack_record *find_stack(struct list_head *bucket,
unsigned long *entries, int size, unsigned long *entries, int size,
u32 hash) u32 hash)
{ {
struct list_head *pos;
struct stack_record *found; struct stack_record *found;
lockdep_assert_held(&pool_rwlock); lockdep_assert_held(&pool_rwlock);
for (found = bucket; found; found = found->next) { list_for_each(pos, bucket) {
found = list_entry(pos, struct stack_record, list);
if (found->hash == hash && if (found->hash == hash &&
found->size == size && found->size == size &&
!stackdepot_memcmp(entries, found->entries, size)) !stackdepot_memcmp(entries, found->entries, size))
...@@ -441,7 +454,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -441,7 +454,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc) gfp_t alloc_flags, bool can_alloc)
{ {
struct stack_record *found = NULL, **bucket; struct list_head *bucket;
struct stack_record *found = NULL;
depot_stack_handle_t handle = 0; depot_stack_handle_t handle = 0;
struct page *page = NULL; struct page *page = NULL;
void *prealloc = NULL; void *prealloc = NULL;
...@@ -468,7 +482,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -468,7 +482,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
read_lock_irqsave(&pool_rwlock, flags); read_lock_irqsave(&pool_rwlock, flags);
/* Fast path: look the stack trace up without full locking. */ /* Fast path: look the stack trace up without full locking. */
found = find_stack(*bucket, entries, nr_entries, hash); found = find_stack(bucket, entries, nr_entries, hash);
if (found) { if (found) {
read_unlock_irqrestore(&pool_rwlock, flags); read_unlock_irqrestore(&pool_rwlock, flags);
goto exit; goto exit;
...@@ -500,14 +514,13 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -500,14 +514,13 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
write_lock_irqsave(&pool_rwlock, flags); write_lock_irqsave(&pool_rwlock, flags);
found = find_stack(*bucket, entries, nr_entries, hash); found = find_stack(bucket, entries, nr_entries, hash);
if (!found) { if (!found) {
struct stack_record *new = struct stack_record *new =
depot_alloc_stack(entries, nr_entries, hash, &prealloc); depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) { if (new) {
new->next = *bucket; list_add(&new->list, bucket);
*bucket = new;
found = new; found = new;
} }
} else if (prealloc) { } else if (prealloc) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment