Commit ed18adc1 authored by Kees Cook's avatar Kees Cook

mm: SLUB hardened usercopy support

Under CONFIG_HARDENED_USERCOPY, this adds object size checking to the
SLUB allocator to catch any copies that may span objects. Includes a
redzone handling fix discovered by Michael Ellerman.

Based on code from PaX and grsecurity.
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Tested-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Reviwed-by: default avatarLaura Abbott <labbott@redhat.com>
parent 04385fc5
...@@ -1766,6 +1766,7 @@ config SLAB ...@@ -1766,6 +1766,7 @@ config SLAB
config SLUB config SLUB
bool "SLUB (Unqueued Allocator)" bool "SLUB (Unqueued Allocator)"
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help help
SLUB is a slab allocator that minimizes cache line usage SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach). instead of managing queues of cached objects (SLAB approach).
......
...@@ -3614,6 +3614,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -3614,6 +3614,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif #endif
#ifdef CONFIG_HARDENED_USERCOPY
/*
* Rejects objects that are incorrectly sized.
*
* Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error.
*/
const char *__check_heap_object(const void *ptr, unsigned long n,
struct page *page)
{
struct kmem_cache *s;
unsigned long offset;
size_t object_size;
/* Find object and usable object size. */
s = page->slab_cache;
object_size = slab_ksize(s);
/* Reject impossible pointers. */
if (ptr < page_address(page))
return s->name;
/* Find offset within object. */
offset = (ptr - page_address(page)) % s->size;
/* Adjust for redzone and reject if within the redzone. */
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
if (offset < s->red_left_pad)
return s->name;
offset -= s->red_left_pad;
}
/* Allow address range falling entirely within object size. */
if (offset <= object_size && n <= object_size - offset)
return NULL;
return s->name;
}
#endif /* CONFIG_HARDENED_USERCOPY */
static size_t __ksize(const void *object) static size_t __ksize(const void *object)
{ {
struct page *page; struct page *page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment