Commit 0f181f9f authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Linus Torvalds

mm/slub.c: init_on_free=1 should wipe freelist ptr for bulk allocations

slab_alloc_node() already zeroed out the freelist pointer if
init_on_free was on.  Thibaut Sautereau noticed that the same needs to
be done for kmem_cache_alloc_bulk(), which performs the allocations
separately.

kmem_cache_alloc_bulk() is currently used in two places in the kernel,
so this change is unlikely to have a major performance impact.

SLAB doesn't require a similar change, as auto-initialization makes the
allocator store the freelist pointers off-slab.

Link: http://lkml.kernel.org/r/20191007091605.30530-1-glider@google.com
Fixes: 6471384a ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options")
Signed-off-by: default avatarAlexander Potapenko <glider@google.com>
Reported-by: default avatarThibaut Sautereau <thibaut@sautereau.fr>
Reported-by: default avatarKees Cook <keescook@chromium.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Laura Abbott <labbott@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3c52b0af
...@@ -2671,6 +2671,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2671,6 +2671,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
return p; return p;
} }
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
{
if (unlikely(slab_want_init_on_free(s)) && obj)
memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
}
/* /*
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
* have the fastpath folded into their functions. So no function call * have the fastpath folded into their functions. So no function call
...@@ -2759,12 +2770,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2759,12 +2770,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
prefetch_freepointer(s, next_object); prefetch_freepointer(s, next_object);
stat(s, ALLOC_FASTPATH); stat(s, ALLOC_FASTPATH);
} }
/*
* If the object has been wiped upon free, make sure it's fully maybe_wipe_obj_freeptr(s, object);
* initialized by zeroing out freelist pointer.
*/
if (unlikely(slab_want_init_on_free(s)) && object)
memset(object + s->offset, 0, sizeof(void *));
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size); memset(object, 0, s->object_size);
...@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
goto error; goto error;
c = this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
continue; /* goto for-loop */ continue; /* goto for-loop */
} }
c->freelist = get_freepointer(s, object); c->freelist = get_freepointer(s, object);
p[i] = object; p[i] = object;
maybe_wipe_obj_freeptr(s, p[i]);
} }
c->tid = next_tid(c->tid); c->tid = next_tid(c->tid);
local_irq_enable(); local_irq_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment