Commit 200072ce authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan: unify large kfree checks

Unify checks in kasan_kfree_large() and in kasan_slab_free_mempool() for
large allocations as it's done for small kfree() allocations.

With this change, kasan_slab_free_mempool() starts checking that the first
byte of the memory that's being freed is accessible.

Link: https://lkml.kernel.org/r/14ffc4cd867e0b1ed58f7527e3b748a1b4ad08aa.1612546384.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent df54b383
...@@ -200,6 +200,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) ...@@ -200,6 +200,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
return false; return false;
} }
void __kasan_kfree_large(void *ptr, unsigned long ip);
static __always_inline void kasan_kfree_large(void *ptr)
{
if (kasan_enabled())
__kasan_kfree_large(ptr, _RET_IP_);
}
void __kasan_slab_free_mempool(void *ptr, unsigned long ip); void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
static __always_inline void kasan_slab_free_mempool(void *ptr) static __always_inline void kasan_slab_free_mempool(void *ptr)
{ {
...@@ -247,13 +254,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, ...@@ -247,13 +254,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object; return (void *)object;
} }
void __kasan_kfree_large(void *ptr, unsigned long ip);
static __always_inline void kasan_kfree_large(void *ptr)
{
if (kasan_enabled())
__kasan_kfree_large(ptr, _RET_IP_);
}
/* /*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation. * the hardware tag-based mode that doesn't rely on compiler instrumentation.
...@@ -302,6 +302,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object) ...@@ -302,6 +302,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{ {
return false; return false;
} }
static inline void kasan_kfree_large(void *ptr) {}
static inline void kasan_slab_free_mempool(void *ptr) {} static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) gfp_t flags)
...@@ -322,7 +323,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, ...@@ -322,7 +323,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{ {
return (void *)object; return (void *)object;
} }
static inline void kasan_kfree_large(void *ptr) {}
static inline bool kasan_check_byte(const void *address) static inline bool kasan_check_byte(const void *address)
{ {
return true; return true;
......
...@@ -364,6 +364,31 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) ...@@ -364,6 +364,31 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return ____kasan_slab_free(cache, object, ip, true); return ____kasan_slab_free(cache, object, ip, true);
} }
static bool ____kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip);
return true;
}
if (!kasan_byte_accessible(ptr)) {
kasan_report_invalid_free(ptr, ip);
return true;
}
/*
* The object will be poisoned by kasan_free_pages() or
* kasan_slab_free_mempool().
*/
return false;
}
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
____kasan_kfree_large(ptr, ip);
}
void __kasan_slab_free_mempool(void *ptr, unsigned long ip) void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
{ {
struct page *page; struct page *page;
...@@ -377,10 +402,8 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) ...@@ -377,10 +402,8 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
*/ */
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
if (ptr != page_address(page)) { if (____kasan_kfree_large(ptr, ip))
kasan_report_invalid_free(ptr, ip);
return; return;
}
kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE); kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE);
} else { } else {
____kasan_slab_free(page->slab_cache, ptr, ip, false); ____kasan_slab_free(page->slab_cache, ptr, ip, false);
...@@ -539,13 +562,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag ...@@ -539,13 +562,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
return ____kasan_kmalloc(page->slab_cache, object, size, flags); return ____kasan_kmalloc(page->slab_cache, object, size, flags);
} }
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr)))
kasan_report_invalid_free(ptr, ip);
/* The object will be poisoned by kasan_free_pages(). */
}
bool __kasan_check_byte(const void *address, unsigned long ip) bool __kasan_check_byte(const void *address, unsigned long ip)
{ {
if (!kasan_byte_accessible(address)) { if (!kasan_byte_accessible(address)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment