Commit b29d3188 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: store free stack records in a freelist

Instead of using the global pool_offset variable to find a free slot when
storing a new stack record, mainlain a freelist of free slots within the
allocated stack pools.

A global next_stack variable is used as the head of the freelist, and the
next field in the stack_record struct is reused as freelist link (when the
record is not in the freelist, this field is used as a link in the hash
table).

This is preparatory patch for implementing the eviction of stack records
from the stack depot.

Link: https://lkml.kernel.org/r/b9e4c79955c2121b69301778643b203d3fb09ccc.1700502145.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a5d21f71
...@@ -54,8 +54,8 @@ union handle_parts { ...@@ -54,8 +54,8 @@ union handle_parts {
}; };
struct stack_record { struct stack_record {
struct stack_record *next; /* Link in the hash table */ struct stack_record *next; /* Link in hash table or freelist */
u32 hash; /* Hash in the hash table */ u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */ u32 size; /* Number of stored frames */
union handle_parts handle; union handle_parts handle;
unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
...@@ -87,10 +87,10 @@ static unsigned int stack_hash_mask; ...@@ -87,10 +87,10 @@ static unsigned int stack_hash_mask;
static void *stack_pools[DEPOT_MAX_POOLS]; static void *stack_pools[DEPOT_MAX_POOLS];
/* Newly allocated pool that is not yet added to stack_pools. */ /* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool; static void *new_pool;
/* Currently used pool in stack_pools. */ /* Number of pools in stack_pools. */
static int pool_index; static int pools_num;
/* Offset to the unused space in the currently used pool. */ /* Next stack in the freelist of stack records within stack_pools. */
static size_t pool_offset; static struct stack_record *next_stack;
/* Lock that protects the variables above. */ /* Lock that protects the variables above. */
static DEFINE_RAW_SPINLOCK(pool_lock); static DEFINE_RAW_SPINLOCK(pool_lock);
/* /*
...@@ -226,6 +226,42 @@ int stack_depot_init(void) ...@@ -226,6 +226,42 @@ int stack_depot_init(void)
} }
EXPORT_SYMBOL_GPL(stack_depot_init); EXPORT_SYMBOL_GPL(stack_depot_init);
/* Initializes a stack depol pool. */
static void depot_init_pool(void *pool)
{
const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE;
int i, offset;
/* Initialize handles and link stack records to each other. */
for (i = 0, offset = 0;
offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
i++, offset += DEPOT_STACK_RECORD_SIZE) {
struct stack_record *stack = pool + offset;
stack->handle.pool_index = pools_num;
stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
stack->handle.extra = 0;
if (i < records_in_pool - 1)
stack->next = (void *)stack + DEPOT_STACK_RECORD_SIZE;
else
stack->next = NULL;
}
/* Link stack records into the freelist. */
WARN_ON(next_stack);
next_stack = pool;
/* Save reference to the pool to be used by depot_fetch_stack(). */
stack_pools[pools_num] = pool;
/*
* WRITE_ONCE() pairs with potential concurrent read in
* depot_fetch_stack().
*/
WRITE_ONCE(pools_num, pools_num + 1);
}
/* Keeps the preallocated memory to be used for a new stack depot pool. */ /* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc) static void depot_keep_new_pool(void **prealloc)
{ {
...@@ -242,7 +278,7 @@ static void depot_keep_new_pool(void **prealloc) ...@@ -242,7 +278,7 @@ static void depot_keep_new_pool(void **prealloc)
* Use the preallocated memory for the new pool * Use the preallocated memory for the new pool
* as long as we do not exceed the maximum number of pools. * as long as we do not exceed the maximum number of pools.
*/ */
if (pool_index + 1 < DEPOT_MAX_POOLS) { if (pools_num < DEPOT_MAX_POOLS) {
new_pool = *prealloc; new_pool = *prealloc;
*prealloc = NULL; *prealloc = NULL;
} }
...@@ -258,45 +294,42 @@ static void depot_keep_new_pool(void **prealloc) ...@@ -258,45 +294,42 @@ static void depot_keep_new_pool(void **prealloc)
} }
/* Updates references to the current and the next stack depot pools. */ /* Updates references to the current and the next stack depot pools. */
static bool depot_update_pools(size_t required_size, void **prealloc) static bool depot_update_pools(void **prealloc)
{ {
/* Check if there is not enough space in the current pool. */ /* Check if we still have objects in the freelist. */
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) { if (next_stack)
/* Bail out if we reached the pool limit. */ goto out_keep_prealloc;
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return false;
}
/* /* Check if we have a new pool saved and use it. */
* Move on to the new pool. if (new_pool) {
* WRITE_ONCE() pairs with potential concurrent read in depot_init_pool(new_pool);
* stack_depot_fetch().
*/
WRITE_ONCE(pool_index, pool_index + 1);
stack_pools[pool_index] = new_pool;
new_pool = NULL; new_pool = NULL;
pool_offset = 0;
/* /* Take note that we might need a new new_pool. */
* If the maximum number of pools is not reached, take note if (pools_num < DEPOT_MAX_POOLS)
* that yet another new pool needs to be allocated.
* smp_store_release() pairs with smp_load_acquire() in
* stack_depot_save().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&new_pool_required, 1); smp_store_release(&new_pool_required, 1);
/* Try keeping the preallocated memory for new_pool. */
goto out_keep_prealloc;
}
/* Bail out if we reached the pool limit. */
if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return false;
} }
/* Check if the current pool is not yet allocated. */ /* Check if we have preallocated memory and use it. */
if (*prealloc && stack_pools[pool_index] == NULL) { if (*prealloc) {
/* Use the preallocated memory for the current pool. */ depot_init_pool(*prealloc);
stack_pools[pool_index] = *prealloc;
*prealloc = NULL; *prealloc = NULL;
return true; return true;
} }
/* Otherwise, try using the preallocated memory for a new pool. */ return false;
out_keep_prealloc:
/* Keep the preallocated memory for a new pool if required. */
if (*prealloc) if (*prealloc)
depot_keep_new_pool(prealloc); depot_keep_new_pool(prealloc);
return true; return true;
...@@ -307,35 +340,35 @@ static struct stack_record * ...@@ -307,35 +340,35 @@ static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{ {
struct stack_record *stack; struct stack_record *stack;
size_t required_size = DEPOT_STACK_RECORD_SIZE;
/* Update current and new pools if required and possible. */ /* Update current and new pools if required and possible. */
if (!depot_update_pools(required_size, prealloc)) if (!depot_update_pools(prealloc))
return NULL; return NULL;
/* Check if we have a pool to save the stack trace. */ /* Check if we have a stack record to save the stack trace. */
if (stack_pools[pool_index] == NULL) stack = next_stack;
if (!stack)
return NULL; return NULL;
/* Advance the freelist. */
next_stack = stack->next;
/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
if (size > CONFIG_STACKDEPOT_MAX_FRAMES) if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
size = CONFIG_STACKDEPOT_MAX_FRAMES; size = CONFIG_STACKDEPOT_MAX_FRAMES;
/* Save the stack trace. */ /* Save the stack trace. */
stack = stack_pools[pool_index] + pool_offset; stack->next = NULL;
stack->hash = hash; stack->hash = hash;
stack->size = size; stack->size = size;
stack->handle.pool_index = pool_index; /* stack->handle is already filled in by depot_init_pool(). */
stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
pool_offset += required_size;
/* /*
* Let KMSAN know the stored stack record is initialized. This shall * Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it. * prevent false positive reports if instrumented code accesses it.
*/ */
kmsan_unpoison_memory(stack, required_size); kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
return stack; return stack;
} }
...@@ -345,16 +378,16 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) ...@@ -345,16 +378,16 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
union handle_parts parts = { .handle = handle }; union handle_parts parts = { .handle = handle };
/* /*
* READ_ONCE() pairs with potential concurrent write in * READ_ONCE() pairs with potential concurrent write in
* depot_update_pools(). * depot_init_pool().
*/ */
int pool_index_cached = READ_ONCE(pool_index); int pools_num_cached = READ_ONCE(pools_num);
void *pool; void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN; size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack; struct stack_record *stack;
if (parts.pool_index > pool_index_cached) { if (parts.pool_index > pools_num_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
parts.pool_index, pool_index_cached, handle); parts.pool_index, pools_num_cached, handle);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment