Commit f67bed13 authored by Vasily Averin's avatar Vasily Averin Committed by Andrew Morton

percpu: improve percpu_alloc_percpu event trace

Add call_site, bytes_alloc and gfp_flags fields to the output of the
percpu_alloc_percpu ftrace event:

mkdir-4393  [001]   169.334788: percpu_alloc_percpu:
 call_site=mem_cgroup_css_alloc+0xa6 reserved=0 is_atomic=0 size=2408 align=8
  base_addr=0xffffc7117fc00000 off=402176 ptr=0x3dc867a62300 bytes_alloc=14448
   gfp_flags=GFP_KERNEL_ACCOUNT

This is required to track memcg-accounted percpu allocations.

Link: https://lkml.kernel.org/r/a07be858-c8a3-7851-9086-e3262cbcf707@openvz.orgSigned-off-by: default avatarVasily Averin <vvs@openvz.org>
Acked-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d1ed51fc
...@@ -6,15 +6,20 @@ ...@@ -6,15 +6,20 @@
#define _TRACE_PERCPU_H #define _TRACE_PERCPU_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
TRACE_EVENT(percpu_alloc_percpu, TRACE_EVENT(percpu_alloc_percpu,
TP_PROTO(bool reserved, bool is_atomic, size_t size, TP_PROTO(unsigned long call_site,
size_t align, void *base_addr, int off, void __percpu *ptr), bool reserved, bool is_atomic, size_t size,
size_t align, void *base_addr, int off,
void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr), TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off,
ptr, bytes_alloc, gfp_flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( bool, reserved ) __field( bool, reserved )
__field( bool, is_atomic ) __field( bool, is_atomic )
__field( size_t, size ) __field( size_t, size )
...@@ -22,9 +27,11 @@ TRACE_EVENT(percpu_alloc_percpu, ...@@ -22,9 +27,11 @@ TRACE_EVENT(percpu_alloc_percpu,
__field( void *, base_addr ) __field( void *, base_addr )
__field( int, off ) __field( int, off )
__field( void __percpu *, ptr ) __field( void __percpu *, ptr )
__field( size_t, bytes_alloc )
__field( gfp_t, gfp_flags )
), ),
TP_fast_assign( TP_fast_assign(
__entry->call_site = call_site;
__entry->reserved = reserved; __entry->reserved = reserved;
__entry->is_atomic = is_atomic; __entry->is_atomic = is_atomic;
__entry->size = size; __entry->size = size;
...@@ -32,12 +39,16 @@ TRACE_EVENT(percpu_alloc_percpu, ...@@ -32,12 +39,16 @@ TRACE_EVENT(percpu_alloc_percpu,
__entry->base_addr = base_addr; __entry->base_addr = base_addr;
__entry->off = off; __entry->off = off;
__entry->ptr = ptr; __entry->ptr = ptr;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = gfp_flags;
), ),
TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p", TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
(void *)__entry->call_site,
__entry->reserved, __entry->is_atomic, __entry->reserved, __entry->is_atomic,
__entry->size, __entry->align, __entry->size, __entry->align,
__entry->base_addr, __entry->off, __entry->ptr) __entry->base_addr, __entry->off, __entry->ptr,
__entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
); );
TRACE_EVENT(percpu_free_percpu, TRACE_EVENT(percpu_free_percpu,
......
...@@ -113,7 +113,6 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk) ...@@ -113,7 +113,6 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
return pcpu_nr_pages_to_map_bits(chunk->nr_pages); return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
} }
#ifdef CONFIG_MEMCG_KMEM
/** /**
* pcpu_obj_full_size - helper to calculate size of each accounted object * pcpu_obj_full_size - helper to calculate size of each accounted object
* @size: size of area to allocate in bytes * @size: size of area to allocate in bytes
...@@ -123,13 +122,14 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk) ...@@ -123,13 +122,14 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
*/ */
static inline size_t pcpu_obj_full_size(size_t size) static inline size_t pcpu_obj_full_size(size_t size)
{ {
size_t extra_size; size_t extra_size = 0;
extra_size = size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *); #ifdef CONFIG_MEMCG_KMEM
extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
#endif
return size * num_possible_cpus() + extra_size; return size * num_possible_cpus() + extra_size;
} }
#endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_PERCPU_STATS #ifdef CONFIG_PERCPU_STATS
......
...@@ -1884,8 +1884,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -1884,8 +1884,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
kmemleak_alloc_percpu(ptr, size, gfp); kmemleak_alloc_percpu(ptr, size, gfp);
trace_percpu_alloc_percpu(reserved, is_atomic, size, align, trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
chunk->base_addr, off, ptr); chunk->base_addr, off, ptr,
pcpu_obj_full_size(size), gfp);
pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment