Commit a7101224 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, slub: move kasan_poison_slab hook before page_address

With tag based KASAN page_address() looks at the page flags to see whether
the resulting pointer needs to have a tag set.  Since we don't want to set
a tag when page_address() is called on SLAB pages, we call
page_kasan_tag_reset() in kasan_poison_slab().  However in allocate_slab()
page_address() is called before kasan_poison_slab().  Fix it by changing
the order.

[andreyknvl@google.com: fix compilation error when CONFIG_SLUB_DEBUG=n]
  Link: http://lkml.kernel.org/r/ac27cc0bbaeb414ed77bcd6671a877cf3546d56e.1550066133.git.andreyknvl@google.com
Link: http://lkml.kernel.org/r/cd895d627465a3f1c712647072d17f10883be2a1.1549921721.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgeniy Stepanov <eugenis@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Qian Cai <cai@lca.pw>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a2f77575
...@@ -1075,6 +1075,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, ...@@ -1075,6 +1075,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object); init_tracking(s, object);
} }
static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
{
if (!(s->flags & SLAB_POISON))
return;
metadata_access_enable();
memset(addr, POISON_INUSE, PAGE_SIZE << order);
metadata_access_disable();
}
static inline int alloc_consistency_checks(struct kmem_cache *s, static inline int alloc_consistency_checks(struct kmem_cache *s,
struct page *page, struct page *page,
void *object, unsigned long addr) void *object, unsigned long addr)
...@@ -1330,6 +1340,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, ...@@ -1330,6 +1340,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */ #else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s, static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {} struct page *page, void *object) {}
static inline void setup_page_debug(struct kmem_cache *s,
void *addr, int order) {}
static inline int alloc_debug_processing(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; } struct page *page, void *object, unsigned long addr) { return 0; }
...@@ -1643,12 +1655,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1643,12 +1655,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (page_is_pfmemalloc(page)) if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page); SetPageSlabPfmemalloc(page);
start = page_address(page); kasan_poison_slab(page);
if (unlikely(s->flags & SLAB_POISON)) start = page_address(page);
memset(start, POISON_INUSE, PAGE_SIZE << order);
kasan_poison_slab(page); setup_page_debug(s, start, order);
shuffle = shuffle_freelist(s, page); shuffle = shuffle_freelist(s, page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment