Commit 7f88f88f authored by Catalin Marinas's avatar Catalin Marinas Committed by Linus Torvalds

mm: kmemleak: avoid false negatives on vmalloc'ed objects

Commit 248ac0e1 ("mm/vmalloc: remove guard page from between vmap
blocks") had the side effect of making vmap_area.va_end member point to
the next vmap_area.va_start.  This was creating an artificial reference
to vmalloc'ed objects and kmemleak was rarely reporting vmalloc() leaks.

This patch marks the vmap_area containing pointers explicitly and
reduces the min ref_count to 2 as vm_struct still contains a reference
to the vmalloc'ed object.  The kmemleak add_scan_area() function has
been improved to allow a SIZE_MAX argument covering the rest of the
object (for simpler calling sites).
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 81556b02
...@@ -753,7 +753,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) ...@@ -753,7 +753,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
} }
spin_lock_irqsave(&object->lock, flags); spin_lock_irqsave(&object->lock, flags);
if (ptr + size > object->pointer + object->size) { if (size == SIZE_MAX) {
size = object->pointer + object->size - ptr;
} else if (ptr + size > object->pointer + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object); dump_object_info(object);
kmem_cache_free(scan_area_cache, area); kmem_cache_free(scan_area_cache, area);
......
...@@ -359,6 +359,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, ...@@ -359,6 +359,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
if (unlikely(!va)) if (unlikely(!va))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/*
* Only scan the relevant parts containing pointers to other objects
* to avoid false negatives.
*/
kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
retry: retry:
spin_lock(&vmap_area_lock); spin_lock(&vmap_area_lock);
/* /*
...@@ -1645,11 +1651,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -1645,11 +1651,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
clear_vm_uninitialized_flag(area); clear_vm_uninitialized_flag(area);
/* /*
* A ref_count = 3 is needed because the vm_struct and vmap_area * A ref_count = 2 is needed because vm_struct allocated in
* structures allocated in the __get_vm_area_node() function contain * __get_vm_area_node() contains a reference to the virtual address of
* references to the virtual address of the vmalloc'ed block. * the vmalloc'ed block.
*/ */
kmemleak_alloc(addr, real_size, 3, gfp_mask); kmemleak_alloc(addr, real_size, 2, gfp_mask);
return addr; return addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment