Commit 53128245 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, kmemleak: pass tagged pointers to kmemleak

Right now we call kmemleak hooks before assigning tags to pointers in
KASAN hooks.  As a result, when an objects gets allocated, kmemleak sees a
differently tagged pointer, compared to the one it sees when the object
gets freed.  Fix it by calling KASAN hooks before kmemleak's ones.

Link: http://lkml.kernel.org/r/cd825aa4897b0fc37d3316838993881daccbe9f5.1549921721.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reported-by: default avatarQian Cai <cai@lca.pw>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgeniy Stepanov <eugenis@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e1db95be
...@@ -437,11 +437,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, ...@@ -437,11 +437,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
void *object = p[i]; p[i] = kasan_slab_alloc(s, p[i], flags);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags); s->flags, flags);
p[i] = kasan_slab_alloc(s, object, flags);
} }
if (memcg_kmem_enabled()) if (memcg_kmem_enabled())
......
...@@ -1228,8 +1228,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) ...@@ -1228,8 +1228,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL; ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
ret = kasan_kmalloc_large(ret, size, flags); ret = kasan_kmalloc_large(ret, size, flags);
kmemleak_alloc(ret, size, 1, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmalloc_order); EXPORT_SYMBOL(kmalloc_order);
......
...@@ -1374,8 +1374,9 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, ...@@ -1374,8 +1374,9 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
*/ */
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{ {
ptr = kasan_kmalloc_large(ptr, size, flags);
kmemleak_alloc(ptr, size, 1, flags); kmemleak_alloc(ptr, size, 1, flags);
return kasan_kmalloc_large(ptr, size, flags); return ptr;
} }
static __always_inline void kfree_hook(void *x) static __always_inline void kfree_hook(void *x)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment