Commit e8ba2906 authored by John Keeping's avatar John Keeping Committed by Arnaldo Carvalho de Melo

perf unwind: Fix libunwind when tid != pid

Commit e5adfc3e ("perf map: Synthesize maps only for thread group
leader") changed the recording side so that we no longer get mmap events
for threads other than the thread group leader (when synthesising these
events for threads which exist before perf is started).

When a file recorded after this change is loaded, the lack of mmap
records mean that unwinding is not set up for any other threads.

This can be seen in a simple record/report scenario:

	perf record --call-graph=dwarf -t $TID
	perf report

If $TID is a process ID then the report will show call graphs, but if
$TID is a secondary thread the output is as if --call-graph=none was
specified.

Following the rationale in that commit, move the libunwind fields into
struct map_groups and update the libunwind functions to take this
instead of the struct thread.  This is only required for
unwind__finish_access which must now be called from map_groups__delete
and the others are changed for symmetry.

Note that unwind__get_entries keeps the thread argument since it is
required for symbol lookup and the libdw unwind provider uses the thread
ID.
Signed-off-by: default avatarJohn Keeping <john@metanate.com>
Reviewed-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Fixes: e5adfc3e ("perf map: Synthesize maps only for thread group leader")
Link: http://lkml.kernel.org/r/20190815100146.28842-2-john@metanate.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent ab6cd0e5
...@@ -647,6 +647,7 @@ struct map_groups *map_groups__new(struct machine *machine) ...@@ -647,6 +647,7 @@ struct map_groups *map_groups__new(struct machine *machine)
void map_groups__delete(struct map_groups *mg) void map_groups__delete(struct map_groups *mg)
{ {
map_groups__exit(mg); map_groups__exit(mg);
unwind__finish_access(mg);
free(mg); free(mg);
} }
...@@ -887,7 +888,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent) ...@@ -887,7 +888,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent)
if (new == NULL) if (new == NULL)
goto out_unlock; goto out_unlock;
err = unwind__prepare_access(thread, new, NULL); err = unwind__prepare_access(mg, new, NULL);
if (err) if (err)
goto out_unlock; goto out_unlock;
......
...@@ -31,6 +31,10 @@ struct map_groups { ...@@ -31,6 +31,10 @@ struct map_groups {
struct maps maps; struct maps maps;
struct machine *machine; struct machine *machine;
refcount_t refcnt; refcount_t refcnt;
#ifdef HAVE_LIBUNWIND_SUPPORT
void *addr_space;
struct unwind_libunwind_ops *unwind_libunwind_ops;
#endif
}; };
#define KMAP_NAME_LEN 256 #define KMAP_NAME_LEN 256
......
...@@ -105,7 +105,6 @@ void thread__delete(struct thread *thread) ...@@ -105,7 +105,6 @@ void thread__delete(struct thread *thread)
} }
up_write(&thread->comm_lock); up_write(&thread->comm_lock);
unwind__finish_access(thread);
nsinfo__zput(thread->nsinfo); nsinfo__zput(thread->nsinfo);
srccode_state_free(&thread->srccode_state); srccode_state_free(&thread->srccode_state);
...@@ -252,7 +251,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str, ...@@ -252,7 +251,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str,
list_add(&new->list, &thread->comm_list); list_add(&new->list, &thread->comm_list);
if (exec) if (exec)
unwind__flush_access(thread); unwind__flush_access(thread->mg);
} }
thread->comm_set = true; thread->comm_set = true;
...@@ -332,7 +331,7 @@ int thread__insert_map(struct thread *thread, struct map *map) ...@@ -332,7 +331,7 @@ int thread__insert_map(struct thread *thread, struct map *map)
{ {
int ret; int ret;
ret = unwind__prepare_access(thread, map, NULL); ret = unwind__prepare_access(thread->mg, map, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -352,7 +351,7 @@ static int __thread__prepare_access(struct thread *thread) ...@@ -352,7 +351,7 @@ static int __thread__prepare_access(struct thread *thread)
down_read(&maps->lock); down_read(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) { for (map = maps__first(maps); map; map = map__next(map)) {
err = unwind__prepare_access(thread, map, &initialized); err = unwind__prepare_access(thread->mg, map, &initialized);
if (err || initialized) if (err || initialized)
break; break;
} }
......
...@@ -44,10 +44,6 @@ struct thread { ...@@ -44,10 +44,6 @@ struct thread {
struct thread_stack *ts; struct thread_stack *ts;
struct nsinfo *nsinfo; struct nsinfo *nsinfo;
struct srccode_state srccode_state; struct srccode_state srccode_state;
#ifdef HAVE_LIBUNWIND_SUPPORT
void *addr_space;
struct unwind_libunwind_ops *unwind_libunwind_ops;
#endif
bool filter; bool filter;
int filter_entry_depth; int filter_entry_depth;
}; };
......
...@@ -616,26 +616,26 @@ static unw_accessors_t accessors = { ...@@ -616,26 +616,26 @@ static unw_accessors_t accessors = {
.get_proc_name = get_proc_name, .get_proc_name = get_proc_name,
}; };
static int _unwind__prepare_access(struct thread *thread) static int _unwind__prepare_access(struct map_groups *mg)
{ {
thread->addr_space = unw_create_addr_space(&accessors, 0); mg->addr_space = unw_create_addr_space(&accessors, 0);
if (!thread->addr_space) { if (!mg->addr_space) {
pr_err("unwind: Can't create unwind address space.\n"); pr_err("unwind: Can't create unwind address space.\n");
return -ENOMEM; return -ENOMEM;
} }
unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL); unw_set_caching_policy(mg->addr_space, UNW_CACHE_GLOBAL);
return 0; return 0;
} }
static void _unwind__flush_access(struct thread *thread) static void _unwind__flush_access(struct map_groups *mg)
{ {
unw_flush_cache(thread->addr_space, 0, 0); unw_flush_cache(mg->addr_space, 0, 0);
} }
static void _unwind__finish_access(struct thread *thread) static void _unwind__finish_access(struct map_groups *mg)
{ {
unw_destroy_addr_space(thread->addr_space); unw_destroy_addr_space(mg->addr_space);
} }
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
...@@ -660,7 +660,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, ...@@ -660,7 +660,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
*/ */
if (max_stack - 1 > 0) { if (max_stack - 1 > 0) {
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL"); WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
addr_space = ui->thread->addr_space; addr_space = ui->thread->mg->addr_space;
if (addr_space == NULL) if (addr_space == NULL)
return -1; return -1;
......
...@@ -11,13 +11,13 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; ...@@ -11,13 +11,13 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
static void unwind__register_ops(struct thread *thread, static void unwind__register_ops(struct map_groups *mg,
struct unwind_libunwind_ops *ops) struct unwind_libunwind_ops *ops)
{ {
thread->unwind_libunwind_ops = ops; mg->unwind_libunwind_ops = ops;
} }
int unwind__prepare_access(struct thread *thread, struct map *map, int unwind__prepare_access(struct map_groups *mg, struct map *map,
bool *initialized) bool *initialized)
{ {
const char *arch; const char *arch;
...@@ -28,7 +28,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map, ...@@ -28,7 +28,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
if (!dwarf_callchain_users) if (!dwarf_callchain_users)
return 0; return 0;
if (thread->addr_space) { if (mg->addr_space) {
pr_debug("unwind: thread map already set, dso=%s\n", pr_debug("unwind: thread map already set, dso=%s\n",
map->dso->name); map->dso->name);
if (initialized) if (initialized)
...@@ -37,14 +37,14 @@ int unwind__prepare_access(struct thread *thread, struct map *map, ...@@ -37,14 +37,14 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
} }
/* env->arch is NULL for live-mode (i.e. perf top) */ /* env->arch is NULL for live-mode (i.e. perf top) */
if (!thread->mg->machine->env || !thread->mg->machine->env->arch) if (!mg->machine->env || !mg->machine->env->arch)
goto out_register; goto out_register;
dso_type = dso__type(map->dso, thread->mg->machine); dso_type = dso__type(map->dso, mg->machine);
if (dso_type == DSO__TYPE_UNKNOWN) if (dso_type == DSO__TYPE_UNKNOWN)
return 0; return 0;
arch = perf_env__arch(thread->mg->machine->env); arch = perf_env__arch(mg->machine->env);
if (!strcmp(arch, "x86")) { if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT) if (dso_type != DSO__TYPE_64BIT)
...@@ -59,37 +59,37 @@ int unwind__prepare_access(struct thread *thread, struct map *map, ...@@ -59,37 +59,37 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
return 0; return 0;
} }
out_register: out_register:
unwind__register_ops(thread, ops); unwind__register_ops(mg, ops);
err = thread->unwind_libunwind_ops->prepare_access(thread); err = mg->unwind_libunwind_ops->prepare_access(mg);
if (initialized) if (initialized)
*initialized = err ? false : true; *initialized = err ? false : true;
return err; return err;
} }
void unwind__flush_access(struct thread *thread) void unwind__flush_access(struct map_groups *mg)
{ {
if (!dwarf_callchain_users) if (!dwarf_callchain_users)
return; return;
if (thread->unwind_libunwind_ops) if (mg->unwind_libunwind_ops)
thread->unwind_libunwind_ops->flush_access(thread); mg->unwind_libunwind_ops->flush_access(mg);
} }
void unwind__finish_access(struct thread *thread) void unwind__finish_access(struct map_groups *mg)
{ {
if (!dwarf_callchain_users) if (!dwarf_callchain_users)
return; return;
if (thread->unwind_libunwind_ops) if (mg->unwind_libunwind_ops)
thread->unwind_libunwind_ops->finish_access(thread); mg->unwind_libunwind_ops->finish_access(mg);
} }
int unwind__get_entries(unwind_entry_cb_t cb, void *arg, int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, int max_stack) struct perf_sample *data, int max_stack)
{ {
if (thread->unwind_libunwind_ops) if (thread->mg->unwind_libunwind_ops)
return thread->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack); return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
return 0; return 0;
} }
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/types.h> #include <linux/types.h>
struct map; struct map;
struct map_groups;
struct perf_sample; struct perf_sample;
struct symbol; struct symbol;
struct thread; struct thread;
...@@ -19,9 +20,9 @@ struct unwind_entry { ...@@ -19,9 +20,9 @@ struct unwind_entry {
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
struct unwind_libunwind_ops { struct unwind_libunwind_ops {
int (*prepare_access)(struct thread *thread); int (*prepare_access)(struct map_groups *mg);
void (*flush_access)(struct thread *thread); void (*flush_access)(struct map_groups *mg);
void (*finish_access)(struct thread *thread); void (*finish_access)(struct map_groups *mg);
int (*get_entries)(unwind_entry_cb_t cb, void *arg, int (*get_entries)(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, int max_stack); struct perf_sample *data, int max_stack);
...@@ -46,20 +47,20 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, ...@@ -46,20 +47,20 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
#endif #endif
int LIBUNWIND__ARCH_REG_ID(int regnum); int LIBUNWIND__ARCH_REG_ID(int regnum);
int unwind__prepare_access(struct thread *thread, struct map *map, int unwind__prepare_access(struct map_groups *mg, struct map *map,
bool *initialized); bool *initialized);
void unwind__flush_access(struct thread *thread); void unwind__flush_access(struct map_groups *mg);
void unwind__finish_access(struct thread *thread); void unwind__finish_access(struct map_groups *mg);
#else #else
static inline int unwind__prepare_access(struct thread *thread __maybe_unused, static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
struct map *map __maybe_unused, struct map *map __maybe_unused,
bool *initialized __maybe_unused) bool *initialized __maybe_unused)
{ {
return 0; return 0;
} }
static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
#endif #endif
#else #else
static inline int static inline int
...@@ -72,14 +73,14 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, ...@@ -72,14 +73,14 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
return 0; return 0;
} }
static inline int unwind__prepare_access(struct thread *thread __maybe_unused, static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
struct map *map __maybe_unused, struct map *map __maybe_unused,
bool *initialized __maybe_unused) bool *initialized __maybe_unused)
{ {
return 0; return 0;
} }
static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
#endif /* HAVE_DWARF_UNWIND_SUPPORT */ #endif /* HAVE_DWARF_UNWIND_SUPPORT */
#endif /* __UNWIND_H */ #endif /* __UNWIND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment