Commit f129c310 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

kasan: introduce kasan_mempool_poison_pages

Introduce and document a kasan_mempool_poison_pages hook to be used by the
mempool code instead of kasan_poison_pages.

Compated to kasan_poison_pages, the new hook:

1. For the tag-based modes, skips checking and poisoning allocations that
   were not tagged due to sampling.

2. Checks for double-free and invalid-free bugs.

In the future, kasan_poison_pages can also be updated to handle #2, but
this is out-of-scope of this series.

Link: https://lkml.kernel.org/r/88dc7340cce28249abf789f6e0c792c317df9ba5.1703024586.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 19568327
...@@ -212,6 +212,29 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, ...@@ -212,6 +212,29 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object; return (void *)object;
} }
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
unsigned long ip);
/**
* kasan_mempool_poison_pages - Check and poison a mempool page allocation.
* @page: Pointer to the page allocation.
* @order: Order of the allocation.
*
* This function is intended for kernel subsystems that cache page allocations
* to reuse them instead of freeing them back to page_alloc (e.g. mempool).
*
* This function is similar to kasan_mempool_poison_object() but operates on
* page allocations.
*
* Return: true if the allocation can be safely reused; false otherwise.
*/
static __always_inline bool kasan_mempool_poison_pages(struct page *page,
unsigned int order)
{
if (kasan_enabled())
return __kasan_mempool_poison_pages(page, order, _RET_IP_);
return true;
}
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
/** /**
* kasan_mempool_poison_object - Check and poison a mempool slab allocation. * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
...@@ -326,6 +349,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, ...@@ -326,6 +349,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{ {
return (void *)object; return (void *)object;
} }
static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
{
return true;
}
static inline bool kasan_mempool_poison_object(void *ptr) static inline bool kasan_mempool_poison_object(void *ptr)
{ {
return true; return true;
......
...@@ -426,6 +426,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag ...@@ -426,6 +426,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
return ____kasan_kmalloc(slab->slab_cache, object, size, flags); return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
} }
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
unsigned long ip)
{
unsigned long *ptr;
if (unlikely(PageHighMem(page)))
return true;
/* Bail out if allocation was excluded due to sampling. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
page_kasan_tag(page) == KASAN_TAG_KERNEL)
return true;
ptr = page_address(page);
if (check_page_allocation(ptr, ip))
return false;
kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
return true;
}
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{ {
struct folio *folio; struct folio *folio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment