Commit 85beb586 authored by Steven Rostedt's avatar Steven Rostedt Committed by Pekka Enberg

tracing/slab: Move kmalloc tracepoint out of inline code

The tracepoint for kmalloc is in the slab inlined code which causes
every instance of kmalloc to have the tracepoint.

This patch moves the tracepoint out of the inline code to the
slab C file, which removes a large number of inlined trace
points.

  objdump -dr vmlinux.slab| grep 'jmpq.*<trace_kmalloc' |wc -l
213
  objdump -dr vmlinux.slab.patched| grep 'jmpq.*<trace_kmalloc' |wc -l
1

This also has a nice impact on size.

   text	   data	    bss	    dec	    hex	filename
7023060	2121564	2482432	11627056	 b16a30	vmlinux.slab
6970579	2109772	2482432	11562783	 b06f1f	vmlinux.slab.patched
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 98072e4d
...@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); ...@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); extern void *kmem_cache_alloc_trace(size_t size,
struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep); extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
} }
...@@ -179,10 +180,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -179,10 +180,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_notrace(cachep, flags); ret = kmem_cache_alloc_trace(size, cachep, flags);
trace_kmalloc(_THIS_IP_, ret,
size, slab_buffer_size(cachep), flags);
return ret; return ret;
} }
...@@ -194,12 +192,14 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); ...@@ -194,12 +192,14 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, extern void *kmem_cache_alloc_node_trace(size_t size,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid); int nodeid);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, kmem_cache_alloc_node_trace(size_t size,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid)
{ {
...@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, ...@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
void *ret;
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
int i = 0; int i = 0;
...@@ -234,13 +233,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -234,13 +233,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_node_notrace(cachep, flags, node); return kmem_cache_alloc_node_trace(size, cachep, flags, node);
trace_kmalloc_node(_THIS_IP_, ret,
size, slab_buffer_size(cachep),
flags, node);
return ret;
} }
return __kmalloc_node(size, flags, node); return __kmalloc_node(size, flags, node);
} }
......
...@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{ {
return __cache_alloc(cachep, flags, __builtin_return_address(0)); void *ret;
ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
trace_kmalloc(_RET_IP_, ret,
size, slab_buffer_size(cachep), flags);
return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_notrace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif #endif
/** /**
...@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, void *kmem_cache_alloc_node_trace(size_t size,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid)
{ {
return __cache_alloc_node(cachep, flags, nodeid, void *ret;
ret = __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0)); __builtin_return_address(0));
trace_kmalloc_node(_RET_IP_, ret,
size, slab_buffer_size(cachep),
flags, nodeid);
return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif #endif
static __always_inline void * static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
void *ret;
cachep = kmem_find_general_cachep(size, flags); cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep))) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep; return cachep;
ret = kmem_cache_alloc_node_notrace(cachep, flags, node); return kmem_cache_alloc_node_trace(size, cachep, flags, node);
trace_kmalloc_node((unsigned long) caller, ret,
size, cachep->buffer_size, flags, node);
return ret;
} }
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment