Commit 196894a6 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

kasan: move is_kmalloc check out of save_alloc_info

Move kasan_info.is_kmalloc check out of save_alloc_info().

This is a preparatory change that simplifies the following patches in this
series.

Link: https://lkml.kernel.org/r/df89f1915b788f9a10319905af6d0202a3b30c30.1662411799.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Peter Collingbourne <pcc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c249f9af
...@@ -424,15 +424,10 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) ...@@ -424,15 +424,10 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
} }
} }
static void save_alloc_info(struct kmem_cache *cache, void *object, static void save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
gfp_t flags, bool is_kmalloc)
{ {
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
/* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
if (cache->kasan_info.is_kmalloc && !is_kmalloc)
return;
alloc_meta = kasan_get_alloc_meta(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta) if (alloc_meta)
kasan_set_track(&alloc_meta->alloc_track, flags); kasan_set_track(&alloc_meta->alloc_track, flags);
...@@ -467,8 +462,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, ...@@ -467,8 +462,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init); kasan_unpoison(tagged_object, cache->object_size, init);
/* Save alloc info (if possible) for non-kmalloc() allocations. */ /* Save alloc info (if possible) for non-kmalloc() allocations. */
if (kasan_stack_collection_enabled()) if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
save_alloc_info(cache, (void *)object, flags, false); save_alloc_info(cache, (void *)object, flags);
return tagged_object; return tagged_object;
} }
...@@ -513,8 +508,8 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache, ...@@ -513,8 +508,8 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations. * Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc(). * This also rewrites the alloc info when called from kasan_krealloc().
*/ */
if (kasan_stack_collection_enabled()) if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
save_alloc_info(cache, (void *)object, flags, true); save_alloc_info(cache, (void *)object, flags);
/* Keep the tag that was set by kasan_slab_alloc(). */ /* Keep the tag that was set by kasan_slab_alloc(). */
return (void *)object; return (void *)object;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment