Commit 022012dc authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot, kasan: add flags to __stack_depot_save and rename

Change the bool can_alloc argument of __stack_depot_save to a u32
  argument that accepts a set of flags.

The following patch will add another flag to stack_depot_save_flags
  besides the existing STACK_DEPOT_FLAG_CAN_ALLOC.

Also rename the function to stack_depot_save_flags, as
  __stack_depot_save is a cryptic name,

Link: https://lkml.kernel.org/r/645fa15239621eebbd3a10331e5864b718839512.1700502145.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3bddc310
...@@ -32,6 +32,17 @@ typedef u32 depot_stack_handle_t; ...@@ -32,6 +32,17 @@ typedef u32 depot_stack_handle_t;
*/ */
#define STACK_DEPOT_EXTRA_BITS 5 #define STACK_DEPOT_EXTRA_BITS 5
typedef u32 depot_flags_t;
/*
* Flags that can be passed to stack_depot_save_flags(); see the comment next
* to its declaration for more details.
*/
#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
#define STACK_DEPOT_FLAGS_NUM 1
#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
/* /*
* Using stack depot requires its initialization, which can be done in 3 ways: * Using stack depot requires its initialization, which can be done in 3 ways:
* *
...@@ -69,31 +80,34 @@ static inline int stack_depot_early_init(void) { return 0; } ...@@ -69,31 +80,34 @@ static inline int stack_depot_early_init(void) { return 0; }
#endif #endif
/** /**
* __stack_depot_save - Save a stack trace to stack depot * stack_depot_save_flags - Save a stack trace to stack depot
* *
* @entries: Pointer to the stack trace * @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack * @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags * @alloc_flags: Allocation GFP flags
* @can_alloc: Allocate stack pools (increased chance of failure if false) * @depot_flags: Stack depot flags
*
* Saves a stack trace from @entries array of size @nr_entries.
* *
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
* %true, stack depot can replenish the stack pools in case no space is left * replenish the stack pools in case no space is left (allocates using GFP
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
* any allocations and fails if no space is left to store the stack trace. * fails if no space is left to store the stack trace.
* *
* If the provided stack trace comes from the interrupt context, only the part * If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved. * up to the interrupt entry is saved.
* *
* Context: Any context, but setting @can_alloc to %false is required if * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently * alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor * this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock). * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
* *
* Return: Handle of the stack struct stored in depot, 0 on failure * Return: Handle of the stack struct stored in depot, 0 on failure
*/ */
depot_stack_handle_t __stack_depot_save(unsigned long *entries, depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
gfp_t gfp_flags, bool can_alloc); gfp_t gfp_flags,
depot_flags_t depot_flags);
/** /**
* stack_depot_save - Save a stack trace to stack depot * stack_depot_save - Save a stack trace to stack depot
...@@ -103,7 +117,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -103,7 +117,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* @alloc_flags: Allocation GFP flags * @alloc_flags: Allocation GFP flags
* *
* Context: Contexts where allocations via alloc_pages() are allowed. * Context: Contexts where allocations via alloc_pages() are allowed.
* See __stack_depot_save() for more details. * See stack_depot_save_flags() for more details.
* *
* Return: Handle of the stack trace stored in depot, 0 on failure * Return: Handle of the stack trace stored in depot, 0 on failure
*/ */
......
...@@ -450,19 +450,24 @@ static inline struct stack_record *find_stack(struct list_head *bucket, ...@@ -450,19 +450,24 @@ static inline struct stack_record *find_stack(struct list_head *bucket,
return NULL; return NULL;
} }
depot_stack_handle_t __stack_depot_save(unsigned long *entries, depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc) gfp_t alloc_flags,
depot_flags_t depot_flags)
{ {
struct list_head *bucket; struct list_head *bucket;
struct stack_record *found = NULL; struct stack_record *found = NULL;
depot_stack_handle_t handle = 0; depot_stack_handle_t handle = 0;
struct page *page = NULL; struct page *page = NULL;
void *prealloc = NULL; void *prealloc = NULL;
bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
bool need_alloc = false; bool need_alloc = false;
unsigned long flags; unsigned long flags;
u32 hash; u32 hash;
if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
return 0;
/* /*
* If this stack trace is from an interrupt, including anything before * If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stack depot growth. * interrupt entry usually leads to unbounded stack depot growth.
...@@ -541,13 +546,14 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -541,13 +546,14 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
handle = found->handle.handle; handle = found->handle.handle;
return handle; return handle;
} }
EXPORT_SYMBOL_GPL(__stack_depot_save); EXPORT_SYMBOL_GPL(stack_depot_save_flags);
depot_stack_handle_t stack_depot_save(unsigned long *entries, depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
gfp_t alloc_flags) gfp_t alloc_flags)
{ {
return __stack_depot_save(entries, nr_entries, alloc_flags, true); return stack_depot_save_flags(entries, nr_entries, alloc_flags,
STACK_DEPOT_FLAG_CAN_ALLOC);
} }
EXPORT_SYMBOL_GPL(stack_depot_save); EXPORT_SYMBOL_GPL(stack_depot_save);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -37,19 +38,19 @@ struct slab *kasan_addr_to_slab(const void *addr) ...@@ -37,19 +38,19 @@ struct slab *kasan_addr_to_slab(const void *addr)
return NULL; return NULL;
} }
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc) depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
{ {
unsigned long entries[KASAN_STACK_DEPTH]; unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries; unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
return __stack_depot_save(entries, nr_entries, flags, can_alloc); return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
} }
void kasan_set_track(struct kasan_track *track, gfp_t flags) void kasan_set_track(struct kasan_track *track, gfp_t flags)
{ {
track->pid = current->pid; track->pid = current->pid;
track->stack = kasan_save_stack(flags, true); track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
} }
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -472,7 +473,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) ...@@ -472,7 +473,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0); sizeof(struct kasan_free_meta) : 0);
} }
static void __kasan_record_aux_stack(void *addr, bool can_alloc) static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
{ {
struct slab *slab = kasan_addr_to_slab(addr); struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache; struct kmem_cache *cache;
...@@ -489,17 +490,17 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc) ...@@ -489,17 +490,17 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc)
return; return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc); alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
} }
void kasan_record_aux_stack(void *addr) void kasan_record_aux_stack(void *addr)
{ {
return __kasan_record_aux_stack(addr, true); return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
} }
void kasan_record_aux_stack_noalloc(void *addr) void kasan_record_aux_stack_noalloc(void *addr)
{ {
return __kasan_record_aux_stack(addr, false); return __kasan_record_aux_stack(addr, 0);
} }
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
......
...@@ -368,7 +368,7 @@ static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int ...@@ -368,7 +368,7 @@ static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
#endif #endif
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc); depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
void kasan_set_track(struct kasan_track *track, gfp_t flags); void kasan_set_track(struct kasan_track *track, gfp_t flags);
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
void kasan_save_free_info(struct kmem_cache *cache, void *object); void kasan_save_free_info(struct kmem_cache *cache, void *object);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/stackdepot.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -101,7 +102,7 @@ static void save_stack_info(struct kmem_cache *cache, void *object, ...@@ -101,7 +102,7 @@ static void save_stack_info(struct kmem_cache *cache, void *object,
struct kasan_stack_ring_entry *entry; struct kasan_stack_ring_entry *entry;
void *old_ptr; void *old_ptr;
stack = kasan_save_stack(gfp_flags, true); stack = kasan_save_stack(gfp_flags, STACK_DEPOT_FLAG_CAN_ALLOC);
/* /*
* Prevent save_stack_info() from modifying stack ring * Prevent save_stack_info() from modifying stack ring
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment