Commit c1da8dd5 authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf lock contention: Skip stack trace from BPF

Currently it collects stack traces to max size then skip entries.
Because we don't have control how to skip perf callchains.  But BPF can
do it with bpf_get_stackid() with a flag.

Say we have max-stack=4 and stack-skip=2, we get these stack traces.

Before:                    After:

     .---> +---+ <--.           .---> +---+ <--.
     |     |   |    |           |     |   |    |
     |     +---+  usable        |     +---+    |
    max    |   |    |          max    |   |    |
   stack   +---+ <--'         stack   +---+  usable
     |     | X |                |     |   |    |
     |     +---+   skip         |     +---+    |
     |     | X |                |     |   |    |
     `---> +---+                `---> +---+ <--'   <=== collection
                                      | X |
                                      +---+   skip
                                      | X |
                                      +---+
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20220912055314.744552-5-namhyung@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 96532a83
......@@ -93,6 +93,8 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
skel->bss->stack_skip = con->stack_skip;
lock_contention_bpf__attach(skel);
return 0;
}
......@@ -127,7 +129,7 @@ int lock_contention_read(struct lock_contention *con)
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap;
struct symbol *sym;
int idx;
int idx = 0;
bpf_map_lookup_elem(fd, &key, &data);
st = zalloc(sizeof(*st));
......@@ -146,8 +148,7 @@ int lock_contention_read(struct lock_contention *con)
bpf_map_lookup_elem(stack, &key, stack_trace);
/* skip BPF + lock internal functions */
idx = con->stack_skip;
/* skip lock internal functions */
while (is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
idx++;
......
......@@ -72,6 +72,7 @@ struct {
int enabled;
int has_cpu;
int has_task;
int stack_skip;
/* error stat */
unsigned long lost;
......@@ -117,7 +118,7 @@ int contention_begin(u64 *ctx)
pelem->timestamp = bpf_ktime_get_ns();
pelem->lock = (__u64)ctx[0];
pelem->flags = (__u32)ctx[1];
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP | stack_skip);
if (pelem->stack_id < 0)
lost++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment