machine: Adopt is_lock_function() from builtin-lock.c

It is used in bpf_lock_contention.c and builtin-lock.c will be made
CONFIG_LIBTRACEEVENT=y conditional, so move it to machine.c, that is
always available.

This makes those 4 global variables for sched and lock text start and
end to move to 'struct machine' too, as conceivably we can have that
info for several machine instances, say some 'perf diff' like tool.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 9d9b22be
...@@ -67,11 +67,6 @@ static enum { ...@@ -67,11 +67,6 @@ static enum {
LOCK_AGGR_CALLER, LOCK_AGGR_CALLER,
} aggr_mode = LOCK_AGGR_ADDR; } aggr_mode = LOCK_AGGR_ADDR;
static u64 sched_text_start;
static u64 sched_text_end;
static u64 lock_text_start;
static u64 lock_text_end;
static struct thread_stat *thread_stat_find(u32 tid) static struct thread_stat *thread_stat_find(u32 tid)
{ {
struct rb_node *node; struct rb_node *node;
...@@ -854,55 +849,6 @@ static int report_lock_release_event(struct evsel *evsel, ...@@ -854,55 +849,6 @@ static int report_lock_release_event(struct evsel *evsel,
return 0; return 0;
} }
bool is_lock_function(struct machine *machine, u64 addr)
{
if (!sched_text_start) {
struct map *kmap;
struct symbol *sym;
sym = machine__find_kernel_symbol_by_name(machine,
"__sched_text_start",
&kmap);
if (!sym) {
/* to avoid retry */
sched_text_start = 1;
return false;
}
sched_text_start = kmap->unmap_ip(kmap, sym->start);
/* should not fail from here */
sym = machine__find_kernel_symbol_by_name(machine,
"__sched_text_end",
&kmap);
sched_text_end = kmap->unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine,
"__lock_text_start",
&kmap);
lock_text_start = kmap->unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine,
"__lock_text_end",
&kmap);
lock_text_end = kmap->unmap_ip(kmap, sym->start);
}
/* failed to get kernel symbols */
if (sched_text_start == 1)
return false;
/* mutex and rwsem functions are in sched text section */
if (sched_text_start <= addr && addr < sched_text_end)
return true;
/* spinlock functions are in lock text section */
if (lock_text_start <= addr && addr < lock_text_end)
return true;
return false;
}
static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip, static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
char *buf, int size) char *buf, int size)
{ {
...@@ -961,7 +907,7 @@ static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sampl ...@@ -961,7 +907,7 @@ static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sampl
goto next; goto next;
sym = node->ms.sym; sym = node->ms.sym;
if (sym && !is_lock_function(machine, node->ip)) { if (sym && !machine__is_lock_function(machine, node->ip)) {
get_symbol_name_offset(node->ms.map, sym, node->ip, get_symbol_name_offset(node->ms.map, sym, node->ip,
buf, size); buf, size);
return 0; return 0;
...@@ -1007,7 +953,7 @@ static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample) ...@@ -1007,7 +953,7 @@ static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
if (++skip <= stack_skip) if (++skip <= stack_skip)
goto next; goto next;
if (node->ms.sym && is_lock_function(machine, node->ip)) if (node->ms.sym && machine__is_lock_function(machine, node->ip))
goto next; goto next;
hash ^= hash_long((unsigned long)node->ip, 64); hash ^= hash_long((unsigned long)node->ip, 64);
......
...@@ -153,7 +153,7 @@ int lock_contention_read(struct lock_contention *con) ...@@ -153,7 +153,7 @@ int lock_contention_read(struct lock_contention *con)
bpf_map_lookup_elem(stack, &key, stack_trace); bpf_map_lookup_elem(stack, &key, stack_trace);
/* skip lock internal functions */ /* skip lock internal functions */
while (is_lock_function(machine, stack_trace[idx]) && while (machine__is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1) idx < con->max_stack - 1)
idx++; idx++;
......
...@@ -145,6 +145,4 @@ static inline int lock_contention_read(struct lock_contention *con __maybe_unuse ...@@ -145,6 +145,4 @@ static inline int lock_contention_read(struct lock_contention *con __maybe_unuse
#endif /* HAVE_BPF_SKEL */ #endif /* HAVE_BPF_SKEL */
bool is_lock_function(struct machine *machine, u64 addr);
#endif /* PERF_LOCK_CONTENTION_H */ #endif /* PERF_LOCK_CONTENTION_H */
...@@ -3336,3 +3336,43 @@ int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, voi ...@@ -3336,3 +3336,43 @@ int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, voi
} }
return err; return err;
} }
bool machine__is_lock_function(struct machine *machine, u64 addr)
{
if (!machine->sched.text_start) {
struct map *kmap;
struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
if (!sym) {
/* to avoid retry */
machine->sched.text_start = 1;
return false;
}
machine->sched.text_start = kmap->unmap_ip(kmap, sym->start);
/* should not fail from here */
sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
machine->sched.text_end = kmap->unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
machine->lock.text_start = kmap->unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
machine->lock.text_end = kmap->unmap_ip(kmap, sym->start);
}
/* failed to get kernel symbols */
if (machine->sched.text_start == 1)
return false;
/* mutex and rwsem functions are in sched text section */
if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
return true;
/* spinlock functions are in lock text section */
if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
return true;
return false;
}
...@@ -56,6 +56,10 @@ struct machine { ...@@ -56,6 +56,10 @@ struct machine {
struct maps *kmaps; struct maps *kmaps;
struct map *vmlinux_map; struct map *vmlinux_map;
u64 kernel_start; u64 kernel_start;
struct {
u64 text_start;
u64 text_end;
} sched, lock;
pid_t *current_tid; pid_t *current_tid;
size_t current_tid_sz; size_t current_tid_sz;
union { /* Tool specific area */ union { /* Tool specific area */
...@@ -212,6 +216,7 @@ static inline bool machine__is_host(struct machine *machine) ...@@ -212,6 +216,7 @@ static inline bool machine__is_host(struct machine *machine)
return machine ? machine->pid == HOST_KERNEL_ID : false; return machine ? machine->pid == HOST_KERNEL_ID : false;
} }
bool machine__is_lock_function(struct machine *machine, u64 addr);
bool machine__is(struct machine *machine, const char *arch); bool machine__is(struct machine *machine, const char *arch);
bool machine__normalized_is(struct machine *machine, const char *arch); bool machine__normalized_is(struct machine *machine, const char *arch);
int machine__nr_cpus_avail(struct machine *machine); int machine__nr_cpus_avail(struct machine *machine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment