Commit 505f5dcb authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Linus Torvalds

mm, kasan: add GFP flags to KASAN API

Add GFP flags to KASAN hooks for future patches to use.

This patch is based on the "mm: kasan: unified support for SLUB and SLAB
allocators" patch originally prepared by Dmitry Chernenkov.
Signed-off-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7ed2f9e6
...@@ -55,13 +55,14 @@ void kasan_poison_slab(struct page *page); ...@@ -55,13 +55,14 @@ void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_kmalloc_large(const void *ptr, size_t size); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr); void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr); void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
void kasan_krealloc(const void *object, size_t new_size); gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
void kasan_slab_alloc(struct kmem_cache *s, void *object); void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
void kasan_slab_free(struct kmem_cache *s, void *object); void kasan_slab_free(struct kmem_cache *s, void *object);
struct kasan_cache { struct kasan_cache {
...@@ -94,14 +95,16 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache, ...@@ -94,14 +95,16 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
static inline void kasan_poison_object_data(struct kmem_cache *cache, static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {} void *object) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size) {} static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {} static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {} static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {} size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {} static inline void kasan_krealloc(const void *object, size_t new_size,
gfp_t flags) {}
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
......
...@@ -376,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, ...@@ -376,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{ {
void *ret = kmem_cache_alloc(s, flags); void *ret = kmem_cache_alloc(s, flags);
kasan_kmalloc(s, ret, size); kasan_kmalloc(s, ret, size, flags);
return ret; return ret;
} }
...@@ -387,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, ...@@ -387,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{ {
void *ret = kmem_cache_alloc_node(s, gfpflags, node); void *ret = kmem_cache_alloc_node(s, gfpflags, node);
kasan_kmalloc(s, ret, size); kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
} }
#endif /* CONFIG_TRACING */ #endif /* CONFIG_TRACING */
......
...@@ -434,9 +434,9 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, ...@@ -434,9 +434,9 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
} }
#endif #endif
void kasan_slab_alloc(struct kmem_cache *cache, void *object) void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{ {
kasan_kmalloc(cache, object, cache->object_size); kasan_kmalloc(cache, object, cache->object_size, flags);
} }
void kasan_slab_free(struct kmem_cache *cache, void *object) void kasan_slab_free(struct kmem_cache *cache, void *object)
...@@ -462,7 +462,8 @@ void kasan_slab_free(struct kmem_cache *cache, void *object) ...@@ -462,7 +462,8 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
} }
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
gfp_t flags)
{ {
unsigned long redzone_start; unsigned long redzone_start;
unsigned long redzone_end; unsigned long redzone_end;
...@@ -491,7 +492,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) ...@@ -491,7 +492,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
} }
EXPORT_SYMBOL(kasan_kmalloc); EXPORT_SYMBOL(kasan_kmalloc);
void kasan_kmalloc_large(const void *ptr, size_t size) void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{ {
struct page *page; struct page *page;
unsigned long redzone_start; unsigned long redzone_start;
...@@ -510,7 +511,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size) ...@@ -510,7 +511,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size)
KASAN_PAGE_REDZONE); KASAN_PAGE_REDZONE);
} }
void kasan_krealloc(const void *object, size_t size) void kasan_krealloc(const void *object, size_t size, gfp_t flags)
{ {
struct page *page; struct page *page;
...@@ -520,9 +521,9 @@ void kasan_krealloc(const void *object, size_t size) ...@@ -520,9 +521,9 @@ void kasan_krealloc(const void *object, size_t size)
page = virt_to_head_page(object); page = virt_to_head_page(object);
if (unlikely(!PageSlab(page))) if (unlikely(!PageSlab(page)))
kasan_kmalloc_large(object, size); kasan_kmalloc_large(object, size, flags);
else else
kasan_kmalloc(page->slab_cache, object, size); kasan_kmalloc(page->slab_cache, object, size, flags);
} }
void kasan_kfree(void *ptr) void kasan_kfree(void *ptr)
......
...@@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element) ...@@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element)
kasan_free_pages(element, (unsigned long)pool->pool_data); kasan_free_pages(element, (unsigned long)pool->pool_data);
} }
static void kasan_unpoison_element(mempool_t *pool, void *element) static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{ {
if (pool->alloc == mempool_alloc_slab) if (pool->alloc == mempool_alloc_slab)
kasan_slab_alloc(pool->pool_data, element); kasan_slab_alloc(pool->pool_data, element, flags);
if (pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_kmalloc)
kasan_krealloc(element, (size_t)pool->pool_data); kasan_krealloc(element, (size_t)pool->pool_data, flags);
if (pool->alloc == mempool_alloc_pages) if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data); kasan_alloc_pages(element, (unsigned long)pool->pool_data);
} }
...@@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element) ...@@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element)
pool->elements[pool->curr_nr++] = element; pool->elements[pool->curr_nr++] = element;
} }
static void *remove_element(mempool_t *pool) static void *remove_element(mempool_t *pool, gfp_t flags)
{ {
void *element = pool->elements[--pool->curr_nr]; void *element = pool->elements[--pool->curr_nr];
BUG_ON(pool->curr_nr < 0); BUG_ON(pool->curr_nr < 0);
kasan_unpoison_element(pool, element); kasan_unpoison_element(pool, element, flags);
check_element(pool, element); check_element(pool, element);
return element; return element;
} }
...@@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool) ...@@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool)
return; return;
while (pool->curr_nr) { while (pool->curr_nr) {
void *element = remove_element(pool); void *element = remove_element(pool, GFP_KERNEL);
pool->free(element, pool->pool_data); pool->free(element, pool->pool_data);
} }
kfree(pool->elements); kfree(pool->elements);
...@@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr) ...@@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr)
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) { if (new_min_nr <= pool->min_nr) {
while (new_min_nr < pool->curr_nr) { while (new_min_nr < pool->curr_nr) {
element = remove_element(pool); element = remove_element(pool, GFP_KERNEL);
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data); pool->free(element, pool->pool_data);
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
...@@ -347,7 +347,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) ...@@ -347,7 +347,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) { if (likely(pool->curr_nr)) {
element = remove_element(pool); element = remove_element(pool, gfp_temp);
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
/* paired with rmb in mempool_free(), read comment there */ /* paired with rmb in mempool_free(), read comment there */
smp_wmb(); smp_wmb();
......
...@@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
void *ret = slab_alloc(cachep, flags, _RET_IP_); void *ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_slab_alloc(cachep, ret); kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret, trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags); cachep->object_size, cachep->size, flags);
...@@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) ...@@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, flags, _RET_IP_); ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_kmalloc(cachep, ret, size); kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret, trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags); size, cachep->size, flags);
return ret; return ret;
...@@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_slab_alloc(cachep, ret); kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size, cachep->object_size, cachep->size,
flags, nodeid); flags, nodeid);
...@@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, ...@@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
void *ret; void *ret;
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_kmalloc(cachep, ret, size);
kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size, size, cachep->size,
flags, nodeid); flags, nodeid);
...@@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) ...@@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(cachep))) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep; return cachep;
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
kasan_kmalloc(cachep, ret, size); kasan_kmalloc(cachep, ret, size, flags);
return ret; return ret;
} }
...@@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ...@@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep; return cachep;
ret = slab_alloc(cachep, flags, caller); ret = slab_alloc(cachep, flags, caller);
kasan_kmalloc(cachep, ret, size); kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret, trace_kmalloc(caller, ret,
size, cachep->size, flags); size, cachep->size, flags);
...@@ -4323,7 +4324,7 @@ size_t ksize(const void *objp) ...@@ -4323,7 +4324,7 @@ size_t ksize(const void *objp)
/* We assume that ksize callers could use the whole allocated area, /* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area. * so we need to unpoison this area.
*/ */
kasan_krealloc(objp, size); kasan_krealloc(objp, size, GFP_NOWAIT);
return size; return size;
} }
......
...@@ -405,7 +405,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, ...@@ -405,7 +405,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1, kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags); s->flags, flags);
kasan_slab_alloc(s, object); kasan_slab_alloc(s, object, flags);
} }
memcg_kmem_put_cache(s); memcg_kmem_put_cache(s);
} }
......
...@@ -1013,7 +1013,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) ...@@ -1013,7 +1013,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_kmem_pages(flags, order); page = alloc_kmem_pages(flags, order);
ret = page ? page_address(page) : NULL; ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags); kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size); kasan_kmalloc_large(ret, size, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmalloc_order); EXPORT_SYMBOL(kmalloc_order);
...@@ -1192,7 +1192,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, ...@@ -1192,7 +1192,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
ks = ksize(p); ks = ksize(p);
if (ks >= new_size) { if (ks >= new_size) {
kasan_krealloc((void *)p, new_size); kasan_krealloc((void *)p, new_size, flags);
return (void *)p; return (void *)p;
} }
......
...@@ -1313,7 +1313,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, ...@@ -1313,7 +1313,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{ {
kmemleak_alloc(ptr, size, 1, flags); kmemleak_alloc(ptr, size, 1, flags);
kasan_kmalloc_large(ptr, size); kasan_kmalloc_large(ptr, size, flags);
} }
static inline void kfree_hook(const void *x) static inline void kfree_hook(const void *x)
...@@ -2596,7 +2596,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) ...@@ -2596,7 +2596,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
void *ret = slab_alloc(s, gfpflags, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
kasan_kmalloc(s, ret, size); kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_trace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
...@@ -2624,7 +2624,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, ...@@ -2624,7 +2624,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node); size, s->size, gfpflags, node);
kasan_kmalloc(s, ret, size); kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_node_trace); EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
...@@ -3182,7 +3182,8 @@ static void early_kmem_cache_node_alloc(int node) ...@@ -3182,7 +3182,8 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n); init_tracking(kmem_cache_node, n);
#endif #endif
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node)); kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
GFP_KERNEL);
init_kmem_cache_node(n); init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects); inc_slabs_node(kmem_cache_node, node, page->objects);
...@@ -3561,7 +3562,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -3561,7 +3562,7 @@ void *__kmalloc(size_t size, gfp_t flags)
trace_kmalloc(_RET_IP_, ret, size, s->size, flags); trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
kasan_kmalloc(s, ret, size); kasan_kmalloc(s, ret, size, flags);
return ret; return ret;
} }
...@@ -3606,7 +3607,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -3606,7 +3607,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
kasan_kmalloc(s, ret, size); kasan_kmalloc(s, ret, size, flags);
return ret; return ret;
} }
...@@ -3635,7 +3636,7 @@ size_t ksize(const void *object) ...@@ -3635,7 +3636,7 @@ size_t ksize(const void *object)
size_t size = __ksize(object); size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area, /* We assume that ksize callers could use whole allocated area,
so we need unpoison this area. */ so we need unpoison this area. */
kasan_krealloc(object, size); kasan_krealloc(object, size, GFP_NOWAIT);
return size; return size;
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(ksize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment