Commit f4e6e289 authored by Kees Cook's avatar Kees Cook

usercopy: Include offset in hardened usercopy report

This refactors the hardened usercopy code so that failure reporting can
happen within the checking functions instead of at the top level. This
simplifies the return value handling and allows more details and offsets
to be included in the report. Having the offset can be much more helpful
in understanding hardened usercopy bugs.
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
parent b394d468
...@@ -167,15 +167,11 @@ void kzfree(const void *); ...@@ -167,15 +167,11 @@ void kzfree(const void *);
size_t ksize(const void *); size_t ksize(const void *);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
const char *__check_heap_object(const void *ptr, unsigned long n, void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
struct page *page); bool to_user);
#else #else
static inline const char *__check_heap_object(const void *ptr, static inline void __check_heap_object(const void *ptr, unsigned long n,
unsigned long n, struct page *page, bool to_user) { }
struct page *page)
{
return NULL;
}
#endif #endif
/* /*
......
...@@ -4397,8 +4397,8 @@ module_init(slab_proc_init); ...@@ -4397,8 +4397,8 @@ module_init(slab_proc_init);
* Returns NULL if check passes, otherwise const char * to name of cache * Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error. * to indicate an error.
*/ */
const char *__check_heap_object(const void *ptr, unsigned long n, void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
struct page *page) bool to_user)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
unsigned int objnr; unsigned int objnr;
...@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n, ...@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
/* Allow address range falling entirely within object size. */ /* Allow address range falling entirely within object size. */
if (offset <= cachep->object_size && n <= cachep->object_size - offset) if (offset <= cachep->object_size && n <= cachep->object_size - offset)
return NULL; return;
return cachep->name; usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
} }
#endif /* CONFIG_HARDENED_USERCOPY */ #endif /* CONFIG_HARDENED_USERCOPY */
......
...@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node); ...@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node);
* Returns NULL if check passes, otherwise const char * to name of cache * Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error. * to indicate an error.
*/ */
const char *__check_heap_object(const void *ptr, unsigned long n, void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
struct page *page) bool to_user)
{ {
struct kmem_cache *s; struct kmem_cache *s;
unsigned long offset; unsigned long offset;
...@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n, ...@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
/* Reject impossible pointers. */ /* Reject impossible pointers. */
if (ptr < page_address(page)) if (ptr < page_address(page))
return s->name; usercopy_abort("SLUB object not in SLUB page?!", NULL,
to_user, 0, n);
/* Find offset within object. */ /* Find offset within object. */
offset = (ptr - page_address(page)) % s->size; offset = (ptr - page_address(page)) % s->size;
...@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n, ...@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
/* Adjust for redzone and reject if within the redzone. */ /* Adjust for redzone and reject if within the redzone. */
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
if (offset < s->red_left_pad) if (offset < s->red_left_pad)
return s->name; usercopy_abort("SLUB object in left red zone",
s->name, to_user, offset, n);
offset -= s->red_left_pad; offset -= s->red_left_pad;
} }
/* Allow address range falling entirely within object size. */ /* Allow address range falling entirely within object size. */
if (offset <= object_size && n <= object_size - offset) if (offset <= object_size && n <= object_size - offset)
return NULL; return;
return s->name; usercopy_abort("SLUB object", s->name, to_user, offset, n);
} }
#endif /* CONFIG_HARDENED_USERCOPY */ #endif /* CONFIG_HARDENED_USERCOPY */
......
...@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail, ...@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
} }
/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
static bool overlaps(const void *ptr, unsigned long n, unsigned long low, static bool overlaps(const unsigned long ptr, unsigned long n,
unsigned long high) unsigned long low, unsigned long high)
{ {
unsigned long check_low = (uintptr_t)ptr; const unsigned long check_low = ptr;
unsigned long check_high = check_low + n; unsigned long check_high = check_low + n;
/* Does not overlap if entirely above or entirely below. */ /* Does not overlap if entirely above or entirely below. */
...@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low, ...@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
} }
/* Is this address range in the kernel text area? */ /* Is this address range in the kernel text area? */
static inline const char *check_kernel_text_object(const void *ptr, static inline void check_kernel_text_object(const unsigned long ptr,
unsigned long n) unsigned long n, bool to_user)
{ {
unsigned long textlow = (unsigned long)_stext; unsigned long textlow = (unsigned long)_stext;
unsigned long texthigh = (unsigned long)_etext; unsigned long texthigh = (unsigned long)_etext;
unsigned long textlow_linear, texthigh_linear; unsigned long textlow_linear, texthigh_linear;
if (overlaps(ptr, n, textlow, texthigh)) if (overlaps(ptr, n, textlow, texthigh))
return "<kernel text>"; usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
/* /*
* Some architectures have virtual memory mappings with a secondary * Some architectures have virtual memory mappings with a secondary
...@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr, ...@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
textlow_linear = (unsigned long)lm_alias(textlow); textlow_linear = (unsigned long)lm_alias(textlow);
/* No different mapping: we're done. */ /* No different mapping: we're done. */
if (textlow_linear == textlow) if (textlow_linear == textlow)
return NULL; return;
/* Check the secondary mapping... */ /* Check the secondary mapping... */
texthigh_linear = (unsigned long)lm_alias(texthigh); texthigh_linear = (unsigned long)lm_alias(texthigh);
if (overlaps(ptr, n, textlow_linear, texthigh_linear)) if (overlaps(ptr, n, textlow_linear, texthigh_linear))
return "<linear kernel text>"; usercopy_abort("linear kernel text", NULL, to_user,
ptr - textlow_linear, n);
return NULL;
} }
static inline const char *check_bogus_address(const void *ptr, unsigned long n) static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
bool to_user)
{ {
/* Reject if object wraps past end of memory. */ /* Reject if object wraps past end of memory. */
if ((unsigned long)ptr + n < (unsigned long)ptr) if (ptr + n < ptr)
return "<wrapped address>"; usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
/* Reject if NULL or ZERO-allocation. */ /* Reject if NULL or ZERO-allocation. */
if (ZERO_OR_NULL_PTR(ptr)) if (ZERO_OR_NULL_PTR(ptr))
return "<null>"; usercopy_abort("null address", NULL, to_user, ptr, n);
return NULL;
} }
/* Checks for allocs that are marked in some way as spanning multiple pages. */ /* Checks for allocs that are marked in some way as spanning multiple pages. */
static inline const char *check_page_span(const void *ptr, unsigned long n, static inline void check_page_span(const void *ptr, unsigned long n,
struct page *page, bool to_user) struct page *page, bool to_user)
{ {
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1; const void *end = ptr + n - 1;
...@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n, ...@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
if (ptr >= (const void *)__start_rodata && if (ptr >= (const void *)__start_rodata &&
end <= (const void *)__end_rodata) { end <= (const void *)__end_rodata) {
if (!to_user) if (!to_user)
return "<rodata>"; usercopy_abort("rodata", NULL, to_user, 0, n);
return NULL; return;
} }
/* Allow kernel data region (if not marked as Reserved). */ /* Allow kernel data region (if not marked as Reserved). */
if (ptr >= (const void *)_sdata && end <= (const void *)_edata) if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
return NULL; return;
/* Allow kernel bss region (if not marked as Reserved). */ /* Allow kernel bss region (if not marked as Reserved). */
if (ptr >= (const void *)__bss_start && if (ptr >= (const void *)__bss_start &&
end <= (const void *)__bss_stop) end <= (const void *)__bss_stop)
return NULL; return;
/* Is the object wholly within one base page? */ /* Is the object wholly within one base page? */
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
((unsigned long)end & (unsigned long)PAGE_MASK))) ((unsigned long)end & (unsigned long)PAGE_MASK)))
return NULL; return;
/* Allow if fully inside the same compound (__GFP_COMP) page. */ /* Allow if fully inside the same compound (__GFP_COMP) page. */
endpage = virt_to_head_page(end); endpage = virt_to_head_page(end);
if (likely(endpage == page)) if (likely(endpage == page))
return NULL; return;
/* /*
* Reject if range is entirely either Reserved (i.e. special or * Reject if range is entirely either Reserved (i.e. special or
...@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n, ...@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
is_reserved = PageReserved(page); is_reserved = PageReserved(page);
is_cma = is_migrate_cma_page(page); is_cma = is_migrate_cma_page(page);
if (!is_reserved && !is_cma) if (!is_reserved && !is_cma)
return "<spans multiple pages>"; usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
page = virt_to_head_page(ptr); page = virt_to_head_page(ptr);
if (is_reserved && !PageReserved(page)) if (is_reserved && !PageReserved(page))
return "<spans Reserved and non-Reserved pages>"; usercopy_abort("spans Reserved and non-Reserved pages",
NULL, to_user, 0, n);
if (is_cma && !is_migrate_cma_page(page)) if (is_cma && !is_migrate_cma_page(page))
return "<spans CMA and non-CMA pages>"; usercopy_abort("spans CMA and non-CMA pages", NULL,
to_user, 0, n);
} }
#endif #endif
return NULL;
} }
static inline const char *check_heap_object(const void *ptr, unsigned long n, static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user) bool to_user)
{ {
struct page *page; struct page *page;
if (!virt_addr_valid(ptr)) if (!virt_addr_valid(ptr))
return NULL; return;
page = virt_to_head_page(ptr); page = virt_to_head_page(ptr);
/* Check slab allocator for flags and size. */ if (PageSlab(page)) {
if (PageSlab(page)) /* Check slab allocator for flags and size. */
return __check_heap_object(ptr, n, page); __check_heap_object(ptr, n, page, to_user);
} else {
/* Verify object does not incorrectly span multiple pages. */ /* Verify object does not incorrectly span multiple pages. */
return check_page_span(ptr, n, page, to_user); check_page_span(ptr, n, page, to_user);
}
} }
/* /*
...@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n, ...@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
*/ */
void __check_object_size(const void *ptr, unsigned long n, bool to_user) void __check_object_size(const void *ptr, unsigned long n, bool to_user)
{ {
const char *err;
/* Skip all tests if size is zero. */ /* Skip all tests if size is zero. */
if (!n) if (!n)
return; return;
/* Check for invalid addresses. */ /* Check for invalid addresses. */
err = check_bogus_address(ptr, n); check_bogus_address((const unsigned long)ptr, n, to_user);
if (err)
goto report;
/* Check for bad heap object. */ /* Check for bad heap object. */
err = check_heap_object(ptr, n, to_user); check_heap_object(ptr, n, to_user);
if (err)
goto report;
/* Check for bad stack object. */ /* Check for bad stack object. */
switch (check_stack_object(ptr, n)) { switch (check_stack_object(ptr, n)) {
...@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) ...@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
*/ */
return; return;
default: default:
err = "<process stack>"; usercopy_abort("process stack", NULL, to_user, 0, n);
goto report;
} }
/* Check for object in kernel to avoid text exposure. */ /* Check for object in kernel to avoid text exposure. */
err = check_kernel_text_object(ptr, n); check_kernel_text_object((const unsigned long)ptr, n, to_user);
if (!err)
return;
report:
usercopy_abort(err, NULL, to_user, 0, n);
} }
EXPORT_SYMBOL(__check_object_size); EXPORT_SYMBOL(__check_object_size);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment