Commit a64b5378 authored by Kees Cook's avatar Kees Cook Committed by Linus Torvalds

mm/slab: sanity-check page type when looking up cache

This avoids any possible type confusion when looking up an object.  For
example, if a non-slab were to be passed to kfree(), the invalid
slab_cache pointer (i.e.  overlapped with some other value from the
struct page union) would be used for subsequent slab manipulations that
could lead to further memory corruption.

Since the page is already in cache, adding the PageSlab() check will
have nearly zero cost, so add a check and WARN() to virt_to_cache().
Additionally replaces an open-coded virt_to_cache().  To support the
failure mode this also updates all callers of virt_to_cache() and
cache_from_obj() to handle a NULL cache pointer return value (though
note that several already handle this case gracefully).

[dan.carpenter@oracle.com: restore IRQs in kfree()]
  Link: http://lkml.kernel.org/r/20190613065637.GE16334@mwanda
Link: http://lkml.kernel.org/r/20190530045017.15252-3-keescook@chromium.orgSigned-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Cc: Alexander Popov <alex.popov@linux.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 598a0717
...@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) ...@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
static int slab_max_order = SLAB_MAX_ORDER_LO; static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata; static bool slab_max_order_set __initdata;
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_head_page(obj);
return page->slab_cache;
}
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx) unsigned int idx)
{ {
...@@ -3715,6 +3709,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) ...@@ -3715,6 +3709,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
s = virt_to_cache(objp); s = virt_to_cache(objp);
else else
s = cache_from_obj(orig_s, objp); s = cache_from_obj(orig_s, objp);
if (!s)
continue;
debug_check_no_locks_freed(objp, s->object_size); debug_check_no_locks_freed(objp, s->object_size);
if (!(s->flags & SLAB_DEBUG_OBJECTS)) if (!(s->flags & SLAB_DEBUG_OBJECTS))
...@@ -3749,6 +3745,10 @@ void kfree(const void *objp) ...@@ -3749,6 +3745,10 @@ void kfree(const void *objp)
local_irq_save(flags); local_irq_save(flags);
kfree_debugcheck(objp); kfree_debugcheck(objp);
c = virt_to_cache(objp); c = virt_to_cache(objp);
if (!c) {
local_irq_restore(flags);
return;
}
debug_check_no_locks_freed(objp, c->object_size); debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size);
...@@ -4219,13 +4219,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, ...@@ -4219,13 +4219,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
*/ */
size_t ksize(const void *objp) size_t ksize(const void *objp)
{ {
struct kmem_cache *c;
size_t size; size_t size;
BUG_ON(!objp); BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR)) if (unlikely(objp == ZERO_SIZE_PTR))
return 0; return 0;
size = virt_to_cache(objp)->object_size; c = virt_to_cache(objp);
size = c ? c->object_size : 0;
/* We assume that ksize callers could use the whole allocated area, /* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area. * so we need to unpoison this area.
*/ */
......
...@@ -350,10 +350,20 @@ static inline void memcg_link_cache(struct kmem_cache *s) ...@@ -350,10 +350,20 @@ static inline void memcg_link_cache(struct kmem_cache *s)
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page;
page = virt_to_head_page(obj);
if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
__func__))
return NULL;
return page->slab_cache;
}
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
struct page *page;
/* /*
* When kmemcg is not being used, both assignments should return the * When kmemcg is not being used, both assignments should return the
...@@ -367,9 +377,8 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) ...@@ -367,9 +377,8 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
return s; return s;
page = virt_to_head_page(x); cachep = virt_to_cache(x);
cachep = page->slab_cache; WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
WARN_ONCE(!slab_equal_or_root(cachep, s),
"%s: Wrong slab cache. %s but object is from %s\n", "%s: Wrong slab cache. %s but object is from %s\n",
__func__, s->name, cachep->name); __func__, s->name, cachep->name);
return cachep; return cachep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment