Commit dcc7cd01 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6

* 'kmemleak' of git://linux-arm.org/linux-2.6:
  kmemleak: fix kconfig for crc32 build error
  kmemleak: Reduce the false positives by checking for modified objects
  kmemleak: Show the age of an unreferenced object
  kmemleak: Release the object lock before calling put_object()
  kmemleak: Scan the _ftrace_events section in modules
  kmemleak: Simplify the kmemleak_scan_area() function prototype
  kmemleak: Do not use off-slab management with SLAB_NOLEAKTRACE
parents bf931a01 b60e26a2
...@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset, ...@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size) __ref; size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref; extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, unsigned long offset, extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
size_t length, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref; extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
...@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr) ...@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
static inline void kmemleak_ignore(const void *ptr) static inline void kmemleak_ignore(const void *ptr)
{ {
} }
static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
size_t length, gfp_t gfp)
{ {
} }
static inline void kmemleak_erase(void **ptr) static inline void kmemleak_erase(void **ptr)
......
...@@ -1910,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, ...@@ -1910,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
unsigned int i; unsigned int i;
/* only scan the sections containing data */ /* only scan the sections containing data */
kmemleak_scan_area(mod->module_core, (unsigned long)mod - kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
(unsigned long)mod->module_core,
sizeof(struct module), GFP_KERNEL);
for (i = 1; i < hdr->e_shnum; i++) { for (i = 1; i < hdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC)) if (!(sechdrs[i].sh_flags & SHF_ALLOC))
...@@ -1921,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, ...@@ -1921,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
continue; continue;
kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - kmemleak_scan_area((void *)sechdrs[i].sh_addr,
(unsigned long)mod->module_core,
sechdrs[i].sh_size, GFP_KERNEL); sechdrs[i].sh_size, GFP_KERNEL);
} }
} }
...@@ -2250,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2250,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod,
"_ftrace_events", "_ftrace_events",
sizeof(*mod->trace_events), sizeof(*mod->trace_events),
&mod->num_trace_events); &mod->num_trace_events);
/*
* This section contains pointers to allocated objects in the trace
* code and not scanning it leads to false positives.
*/
kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
mod->num_trace_events, GFP_KERNEL);
#endif #endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD #ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */ /* sechdrs[0].sh_size is always zero */
......
...@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK ...@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK
select DEBUG_FS if SYSFS select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS select KALLSYMS
select CRC32
help help
Say Y here if you want to enable the memory leak Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way detector. The memory allocation/freeing is traced in a way
......
This diff is collapsed.
...@@ -2275,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2275,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* /*
* Determine if the slab management is 'on' or 'off' slab. * Determine if the slab management is 'on' or 'off' slab.
* (bootstrapping cannot cope with offslab caches so don't do * (bootstrapping cannot cope with offslab caches so don't do
* it too early on.) * it too early on. Always use on-slab management when
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/ */
if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
!(flags & SLAB_NOLEAKTRACE))
/* /*
* Size is large, assume best to place the slab management obj * Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs). * off-slab (should allow better packing of objs).
...@@ -2596,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, ...@@ -2596,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
* kmemleak does not treat the ->s_mem pointer as a reference * kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak. * to the object. Otherwise we will not report the leak.
*/ */
kmemleak_scan_area(slabp, offsetof(struct slab, list), kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
sizeof(struct list_head), local_flags); local_flags);
if (!slabp) if (!slabp)
return NULL; return NULL;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment