Commit 66afc7f1 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan: add __must_check annotations to kasan hooks

This patch adds __must_check annotations to kasan hooks that return a
pointer to make sure that a tagged pointer always gets propagated.

Link: http://lkml.kernel.org/r/03b269c5e453945f724bfca3159d4e1333a8fb1c.1544099024.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Suggested-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2813b9c0
...@@ -49,16 +49,20 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, ...@@ -49,16 +49,20 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void kasan_poison_slab(struct page *page); void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
const void *object);
void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
gfp_t flags);
void kasan_kfree_large(void *ptr, unsigned long ip); void kasan_kfree_large(void *ptr, unsigned long ip);
void kasan_poison_kfree(void *ptr, unsigned long ip); void kasan_poison_kfree(void *ptr, unsigned long ip);
void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
gfp_t flags); size_t size, gfp_t flags);
void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void * __must_check kasan_krealloc(const void *object, size_t new_size,
gfp_t flags);
void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
struct kasan_cache { struct kasan_cache {
......
...@@ -373,7 +373,8 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) ...@@ -373,7 +373,8 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
#endif #endif
} }
void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{ {
struct kasan_alloc_meta *alloc_info; struct kasan_alloc_meta *alloc_info;
...@@ -389,7 +390,8 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) ...@@ -389,7 +390,8 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
return (void *)object; return (void *)object;
} }
void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
gfp_t flags)
{ {
return kasan_kmalloc(cache, object, cache->object_size, flags); return kasan_kmalloc(cache, object, cache->object_size, flags);
} }
...@@ -449,8 +451,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) ...@@ -449,8 +451,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return __kasan_slab_free(cache, object, ip, true); return __kasan_slab_free(cache, object, ip, true);
} }
void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
gfp_t flags) size_t size, gfp_t flags)
{ {
unsigned long redzone_start; unsigned long redzone_start;
unsigned long redzone_end; unsigned long redzone_end;
...@@ -482,7 +484,8 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, ...@@ -482,7 +484,8 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
} }
EXPORT_SYMBOL(kasan_kmalloc); EXPORT_SYMBOL(kasan_kmalloc);
void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
gfp_t flags)
{ {
struct page *page; struct page *page;
unsigned long redzone_start; unsigned long redzone_start;
...@@ -506,7 +509,7 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) ...@@ -506,7 +509,7 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
return (void *)ptr; return (void *)ptr;
} }
void *kasan_krealloc(const void *object, size_t size, gfp_t flags) void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
{ {
struct page *page; struct page *page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment