Commit 2dfe63e6 authored by Marco Elver's avatar Marco Elver Committed by Linus Torvalds

mm, kfence: support kmem_dump_obj() for KFENCE objects

Calling kmem_obj_info() via kmem_dump_obj() on KFENCE objects has been
producing garbage data due to the object not actually being maintained
by SLAB or SLUB.

Fix this by implementing __kfence_obj_info() that copies relevant
information to struct kmem_obj_info when the object was allocated by
KFENCE; this is called by a common kmem_obj_info(), which also calls the
slab/slub/slob specific variant now called __kmem_obj_info().

For completeness, kmem_dump_obj() now displays if the object was
allocated by KFENCE.

Link: https://lore.kernel.org/all/20220323090520.GG16885@xsang-OptiPlex-9020/
Link: https://lkml.kernel.org/r/20220406131558.3558585-1-elver@google.com
Fixes: b89fb5ef ("mm, kfence: insert KFENCE hooks for SLUB")
Fixes: d3fb45f3 ("mm, kfence: insert KFENCE hooks for SLAB")
Signed-off-by: default avatarMarco Elver <elver@google.com>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>	[slab]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b1add418
...@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr) ...@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
*/ */
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs); bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
#ifdef CONFIG_PRINTK
struct kmem_obj_info;
/**
* __kfence_obj_info() - fill kmem_obj_info struct
* @kpp: kmem_obj_info to be filled
* @object: the object
*
* Return:
* * false - not a KFENCE object
* * true - a KFENCE object, filled @kpp
*
* Copies information to @kpp for KFENCE objects.
*/
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif
#else /* CONFIG_KFENCE */ #else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; } static inline bool is_kfence_address(const void *addr) { return false; }
...@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo ...@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
return false; return false;
} }
#ifdef CONFIG_PRINTK
struct kmem_obj_info;
static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
return false;
}
#endif
#endif #endif
#endif /* _LINUX_KFENCE_H */ #endif /* _LINUX_KFENCE_H */
...@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr) ...@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr)
return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
} }
static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
{
long index;
/* The checks do not affect performance; only called from slow-paths. */
if (!is_kfence_address((void *)addr))
return NULL;
/*
* May be an invalid index if called with an address at the edge of
* __kfence_pool, in which case we would report an "invalid access"
* error.
*/
index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
return NULL;
return &kfence_metadata[index];
}
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
{ {
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
......
...@@ -96,6 +96,27 @@ struct kfence_metadata { ...@@ -96,6 +96,27 @@ struct kfence_metadata {
extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
{
long index;
/* The checks do not affect performance; only called from slow-paths. */
if (!is_kfence_address((void *)addr))
return NULL;
/*
* May be an invalid index if called with an address at the edge of
* __kfence_pool, in which case we would report an "invalid access"
* error.
*/
index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
return NULL;
return &kfence_metadata[index];
}
/* KFENCE error types for report generation. */ /* KFENCE error types for report generation. */
enum kfence_error_type { enum kfence_error_type {
KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
......
...@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r ...@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
/* We encountered a memory safety error, taint the kernel! */ /* We encountered a memory safety error, taint the kernel! */
add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
} }
#ifdef CONFIG_PRINTK
static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
{
int i, j;
i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
kp_stack[j] = (void *)track->stack_entries[i];
if (j < KS_ADDRS_COUNT)
kp_stack[j] = NULL;
}
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
unsigned long flags;
if (!meta)
return false;
/*
* If state is UNUSED at least show the pointer requested; the rest
* would be garbage data.
*/
kpp->kp_ptr = object;
/* Requesting info an a never-used object is almost certainly a bug. */
if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
return true;
raw_spin_lock_irqsave(&meta->lock, flags);
kpp->kp_slab = slab;
kpp->kp_slab_cache = meta->cache;
kpp->kp_objp = (void *)meta->addr;
kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
if (meta->state == KFENCE_OBJECT_FREED)
kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
/* get_stack_skipnr() ensures the first entry is outside allocator. */
kpp->kp_ret = kpp->kp_stack[0];
raw_spin_unlock_irqrestore(&meta->lock, flags);
return true;
}
#endif
...@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller); ...@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
unsigned int objnr; unsigned int objnr;
......
...@@ -868,7 +868,7 @@ struct kmem_obj_info { ...@@ -868,7 +868,7 @@ struct kmem_obj_info {
void *kp_stack[KS_ADDRS_COUNT]; void *kp_stack[KS_ADDRS_COUNT];
void *kp_free_stack[KS_ADDRS_COUNT]; void *kp_free_stack[KS_ADDRS_COUNT];
}; };
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif #endif
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
......
...@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object) ...@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
} }
EXPORT_SYMBOL_GPL(kmem_valid_obj); EXPORT_SYMBOL_GPL(kmem_valid_obj);
static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
if (__kfence_obj_info(kpp, object, slab))
return;
__kmem_obj_info(kpp, object, slab);
}
/** /**
* kmem_dump_obj - Print available slab provenance information * kmem_dump_obj - Print available slab provenance information
* @object: slab object for which to find provenance information. * @object: slab object for which to find provenance information.
...@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object) ...@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
else else
pr_cont(" slab%s", cp); pr_cont(" slab%s", cp);
if (is_kfence_address(object))
pr_cont(" (kfence)");
if (kp.kp_objp) if (kp.kp_objp)
pr_cont(" start %px", kp.kp_objp); pr_cont(" start %px", kp.kp_objp);
if (kp.kp_data_offset) if (kp.kp_data_offset)
......
...@@ -463,7 +463,7 @@ static void slob_free(void *block, int size) ...@@ -463,7 +463,7 @@ static void slob_free(void *block, int size)
} }
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
kpp->kp_ptr = object; kpp->kp_ptr = object;
kpp->kp_slab = slab; kpp->kp_slab = slab;
......
...@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) ...@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
} }
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
void *base; void *base;
int __maybe_unused i; int __maybe_unused i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment