Commit f1942b96 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core refactorings and improvements from Arnaldo Carvalho de Melo:

User visible changes:

  - Add hint for 'Too many events are opened.' error message (Jiri Olsa)

Infrastructure changes:

  - Protect accesses to map rbtrees with a lock and refcount struct map,
    reducing memory usage as maps not used get freed. The 'dso' struct is
    next in line. (Arnaldo Carvalho de Melo)

  - Annotation and branch related option parsing refactorings to
    share code with upcoming patches (Andi Kleen)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 09a216ea f00898f4
......@@ -59,6 +59,10 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
(al->sym == NULL ||
strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
/*
* FIXME: why isn't this done in the symbol_filter when loading
* the DSO?
*/
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
&al->map->dso->symbols[al->map->type]);
......
......@@ -28,6 +28,7 @@
#include "util/thread_map.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/parse-branch-options.h"
#include <unistd.h>
#include <sched.h>
......@@ -751,94 +752,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
return status;
}
#define BRANCH_OPT(n, m) \
{ .name = n, .mode = (m) }
#define BRANCH_END { .name = NULL }
struct branch_mode {
const char *name;
int mode;
};
static const struct branch_mode branch_modes[] = {
BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
BRANCH_END
};
static int
parse_branch_stack(const struct option *opt, const char *str, int unset)
{
#define ONLY_PLM \
(PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
uint64_t *mode = (uint64_t *)opt->value;
const struct branch_mode *br;
char *s, *os = NULL, *p;
int ret = -1;
if (unset)
return 0;
/*
* cannot set it twice, -b + --branch-filter for instance
*/
if (*mode)
return -1;
/* str may be NULL in case no arg is passed to -b */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (br = branch_modes; br->name; br++) {
if (!strcasecmp(s, br->name))
break;
}
if (!br->name) {
ui__warning("unknown branch filter %s,"
" check man page\n", s);
goto error;
}
*mode |= br->mode;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
/* default to any branch */
if ((*mode & ~ONLY_PLM) == 0) {
*mode = PERF_SAMPLE_BRANCH_ANY;
}
error:
free(os);
return ret;
}
static void callchain_debug(void)
{
static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
......
......@@ -26,7 +26,7 @@ int test__vmlinux_matches_kallsyms(void)
struct map *kallsyms_map, *vmlinux_map, *map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
struct rb_root *maps = &vmlinux.kmaps.maps[type];
struct maps *maps = &vmlinux.kmaps.maps[type];
u64 mem_start, mem_end;
/*
......
......@@ -75,6 +75,7 @@ libperf-$(CONFIG_X86) += tsc.o
libperf-y += cloexec.o
libperf-y += thread-stack.o
libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-y += parse-branch-options.o
libperf-$(CONFIG_LIBELF) += symbol-elf.o
libperf-$(CONFIG_LIBELF) += probe-event.o
......
......@@ -506,6 +506,17 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
return 0;
}
static struct annotation *symbol__get_annotation(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
if (symbol__alloc_hist(sym) < 0)
return NULL;
}
return notes;
}
static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
int evidx, u64 addr)
{
......@@ -513,13 +524,9 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
if (sym == NULL)
return 0;
notes = symbol__annotation(sym);
if (notes->src == NULL) {
if (symbol__alloc_hist(sym) < 0)
notes = symbol__get_annotation(sym);
if (notes == NULL)
return -ENOMEM;
}
return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
}
......
......@@ -331,7 +331,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
int rc = 0;
struct map *pos;
struct map_groups *kmaps = &machine->kmaps;
struct rb_root *maps = &kmaps->maps[MAP__FUNCTION];
struct maps *maps = &kmaps->maps[MAP__FUNCTION];
union perf_event *event = zalloc((sizeof(event->mmap) +
machine->id_hdr_size));
if (event == NULL) {
......
......@@ -2149,7 +2149,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
case EMFILE:
return scnprintf(msg, size, "%s",
"Too many events are opened.\n"
"Try again after reducing the number of events.");
"Probably the maximum number of open file descriptors has been reached.\n"
"Hint: Try again after reducing the number of events.\n"
"Hint: Try increasing the limit with 'ulimit -n <limit>'");
case ENODEV:
if (target->cpu_list)
return scnprintf(msg, size, "%s",
......
......@@ -759,7 +759,6 @@ void machine__destroy_kernel_maps(struct machine *machine)
kmap->ref_reloc_sym = NULL;
}
map__delete(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL;
}
}
......@@ -1247,6 +1246,7 @@ int machine__process_mmap2_event(struct machine *machine,
thread__insert_map(thread, map);
thread__put(thread);
map__put(map);
return 0;
out_problem_map:
......@@ -1297,6 +1297,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
thread__insert_map(thread, map);
thread__put(thread);
map__put(map);
return 0;
out_problem_map:
......
......@@ -16,6 +16,8 @@
#include "machine.h"
#include <linux/string.h>
static void __maps__insert(struct maps *maps, struct map *map);
const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
[MAP__VARIABLE] = "Variables",
......@@ -137,6 +139,7 @@ void map__init(struct map *map, enum map_type type,
map->groups = NULL;
map->referenced = false;
map->erange_warned = false;
atomic_set(&map->refcnt, 1);
}
struct map *map__new(struct machine *machine, u64 start, u64 len,
......@@ -223,9 +226,16 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
void map__delete(struct map *map)
{
BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
free(map);
}
void map__put(struct map *map)
{
if (map && atomic_dec_and_test(&map->refcnt))
map__delete(map);
}
void map__fixup_start(struct map *map)
{
struct rb_root *symbols = &map->dso->symbols[map->type];
......@@ -418,48 +428,61 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
return ip + map->reloc;
}
static void maps__init(struct maps *maps)
{
maps->entries = RB_ROOT;
pthread_rwlock_init(&maps->lock, NULL);
INIT_LIST_HEAD(&maps->removed_maps);
}
void map_groups__init(struct map_groups *mg, struct machine *machine)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i) {
mg->maps[i] = RB_ROOT;
INIT_LIST_HEAD(&mg->removed_maps[i]);
maps__init(&mg->maps[i]);
}
mg->machine = machine;
atomic_set(&mg->refcnt, 1);
}
static void maps__delete(struct rb_root *maps)
static void __maps__purge(struct maps *maps)
{
struct rb_node *next = rb_first(maps);
struct rb_root *root = &maps->entries;
struct rb_node *next = rb_first(root);
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, maps);
map__delete(pos);
rb_erase_init(&pos->rb_node, root);
map__put(pos);
}
}
static void maps__delete_removed(struct list_head *maps)
static void __maps__purge_removed_maps(struct maps *maps)
{
struct map *pos, *n;
list_for_each_entry_safe(pos, n, maps, node) {
list_del(&pos->node);
map__delete(pos);
list_for_each_entry_safe(pos, n, &maps->removed_maps, node) {
list_del_init(&pos->node);
map__put(pos);
}
}
static void maps__exit(struct maps *maps)
{
pthread_rwlock_wrlock(&maps->lock);
__maps__purge(maps);
__maps__purge_removed_maps(maps);
pthread_rwlock_unlock(&maps->lock);
}
void map_groups__exit(struct map_groups *mg)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i) {
maps__delete(&mg->maps[i]);
maps__delete_removed(&mg->removed_maps[i]);
}
for (i = 0; i < MAP__NR_TYPES; ++i)
maps__exit(&mg->maps[i]);
}
bool map_groups__empty(struct map_groups *mg)
......@@ -469,7 +492,7 @@ bool map_groups__empty(struct map_groups *mg)
for (i = 0; i < MAP__NR_TYPES; ++i) {
if (maps__first(&mg->maps[i]))
return false;
if (!list_empty(&mg->removed_maps[i]))
if (!list_empty(&mg->maps[i].removed_maps))
return false;
}
......@@ -521,20 +544,28 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
struct map **mapp,
symbol_filter_t filter)
{
struct maps *maps = &mg->maps[type];
struct symbol *sym;
struct rb_node *nd;
for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
pthread_rwlock_rdlock(&maps->lock);
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
sym = map__find_symbol_by_name(pos, name, filter);
if (sym == NULL)
continue;
if (mapp != NULL)
*mapp = pos;
return sym;
goto out;
}
return NULL;
sym = NULL;
out:
pthread_rwlock_unlock(&maps->lock);
return sym;
}
int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
......@@ -554,25 +585,35 @@ int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
return ams->sym ? 0 : -1;
}
size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
FILE *fp)
static size_t maps__fprintf(struct maps *maps, FILE *fp)
{
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
size_t printed = 0;
struct rb_node *nd;
for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
pthread_rwlock_rdlock(&maps->lock);
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
if (verbose > 2) {
printed += dso__fprintf(pos->dso, type, fp);
printed += dso__fprintf(pos->dso, pos->type, fp);
printed += fprintf(fp, "--\n");
}
}
pthread_rwlock_unlock(&maps->lock);
return printed;
}
size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
FILE *fp)
{
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
return printed += maps__fprintf(&mg->maps[type], fp);
}
static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp)
{
size_t printed = 0, i;
......@@ -587,7 +628,7 @@ static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
struct map *pos;
size_t printed = 0;
list_for_each_entry(pos, &mg->removed_maps[type], node) {
list_for_each_entry(pos, &mg->maps[type].removed_maps, node) {
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
if (verbose > 1) {
......@@ -614,13 +655,17 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
return printed + map_groups__fprintf_removed_maps(mg, fp);
}
int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
FILE *fp)
static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
{
struct rb_root *root = &mg->maps[map->type];
struct rb_node *next = rb_first(root);
struct rb_root *root;
struct rb_node *next;
int err = 0;
pthread_rwlock_wrlock(&maps->lock);
root = &maps->entries;
next = rb_first(root);
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node);
next = rb_next(&pos->rb_node);
......@@ -634,7 +679,7 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
map__fprintf(pos, fp);
}
rb_erase(&pos->rb_node, root);
rb_erase_init(&pos->rb_node, root);
/*
* Now check if we need to create new maps for areas not
* overlapped by the new map:
......@@ -644,11 +689,11 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
if (before == NULL) {
err = -ENOMEM;
goto move_map;
goto put_map;
}
before->end = map->start;
map_groups__insert(mg, before);
__maps__insert(maps, before);
if (verbose >= 2)
map__fprintf(before, fp);
}
......@@ -658,28 +703,37 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
if (after == NULL) {
err = -ENOMEM;
goto move_map;
goto put_map;
}
after->start = map->end;
map_groups__insert(mg, after);
__maps__insert(maps, after);
if (verbose >= 2)
map__fprintf(after, fp);
}
move_map:
put_map:
/*
* If we have references, just move them to a separate list.
*/
if (pos->referenced)
list_add_tail(&pos->node, &mg->removed_maps[map->type]);
list_add_tail(&pos->node, &maps->removed_maps);
else
map__delete(pos);
map__put(pos);
if (err)
return err;
goto out;
}
return 0;
err = 0;
out:
pthread_rwlock_unlock(&maps->lock);
return err;
}
int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
FILE *fp)
{
return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
}
/*
......@@ -688,21 +742,28 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
int map_groups__clone(struct map_groups *mg,
struct map_groups *parent, enum map_type type)
{
int err = -ENOMEM;
struct map *map;
struct rb_root *maps = &parent->maps[type];
struct maps *maps = &parent->maps[type];
pthread_rwlock_rdlock(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
struct map *new = map__clone(map);
if (new == NULL)
return -ENOMEM;
goto out_unlock;
map_groups__insert(mg, new);
}
return 0;
err = 0;
out_unlock:
pthread_rwlock_unlock(&maps->lock);
return err;
}
void maps__insert(struct rb_root *maps, struct map *map)
static void __maps__insert(struct maps *maps, struct map *map)
{
struct rb_node **p = &maps->rb_node;
struct rb_node **p = &maps->entries.rb_node;
struct rb_node *parent = NULL;
const u64 ip = map->start;
struct map *m;
......@@ -717,20 +778,38 @@ void maps__insert(struct rb_root *maps, struct map *map)
}
rb_link_node(&map->rb_node, parent, p);
rb_insert_color(&map->rb_node, maps);
rb_insert_color(&map->rb_node, &maps->entries);
map__get(map);
}
void maps__remove(struct rb_root *maps, struct map *map)
void maps__insert(struct maps *maps, struct map *map)
{
rb_erase(&map->rb_node, maps);
pthread_rwlock_wrlock(&maps->lock);
__maps__insert(maps, map);
pthread_rwlock_unlock(&maps->lock);
}
struct map *maps__find(struct rb_root *maps, u64 ip)
static void __maps__remove(struct maps *maps, struct map *map)
{
struct rb_node **p = &maps->rb_node;
struct rb_node *parent = NULL;
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
{
pthread_rwlock_wrlock(&maps->lock);
__maps__remove(maps, map);
pthread_rwlock_unlock(&maps->lock);
}
struct map *maps__find(struct maps *maps, u64 ip)
{
struct rb_node **p, *parent = NULL;
struct map *m;
pthread_rwlock_rdlock(&maps->lock);
p = &maps->entries.rb_node;
while (*p != NULL) {
parent = *p;
m = rb_entry(parent, struct map, rb_node);
......@@ -739,15 +818,18 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
else if (ip >= m->end)
p = &(*p)->rb_right;
else
return m;
goto out;
}
return NULL;
m = NULL;
out:
pthread_rwlock_unlock(&maps->lock);
return m;
}
struct map *maps__first(struct rb_root *maps)
struct map *maps__first(struct maps *maps)
{
struct rb_node *first = rb_first(maps);
struct rb_node *first = rb_first(&maps->entries);
if (first)
return rb_entry(first, struct map, rb_node);
......
......@@ -5,6 +5,7 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <stdio.h>
#include <stdbool.h>
#include <linux/types.h>
......@@ -51,6 +52,7 @@ struct map {
struct dso *dso;
struct map_groups *groups;
atomic_t refcnt;
};
struct kmap {
......@@ -58,9 +60,14 @@ struct kmap {
struct map_groups *kmaps;
};
struct maps {
struct rb_root entries;
pthread_rwlock_t lock;
struct list_head removed_maps;
};
struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
struct maps maps[MAP__NR_TYPES];
struct machine *machine;
atomic_t refcnt;
};
......@@ -144,6 +151,16 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
void map__delete(struct map *map);
struct map *map__clone(struct map *map);
static inline struct map *map__get(struct map *map)
{
if (map)
atomic_inc(&map->refcnt);
return map;
}
void map__put(struct map *map);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
......@@ -162,10 +179,10 @@ void map__reloc_vmlinux(struct map *map);
size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
struct map *maps__first(struct rb_root *maps);
void maps__insert(struct maps *maps, struct map *map);
void maps__remove(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr);
struct map *maps__first(struct maps *maps);
struct map *map__next(struct map *map);
void map_groups__init(struct map_groups *mg, struct machine *machine);
void map_groups__exit(struct map_groups *mg);
......
#include "perf.h"
#include "util/util.h"
#include "util/debug.h"
#include "util/parse-options.h"
#include "util/parse-branch-options.h"
#define BRANCH_OPT(n, m) \
{ .name = n, .mode = (m) }
#define BRANCH_END { .name = NULL }
struct branch_mode {
const char *name;
int mode;
};
static const struct branch_mode branch_modes[] = {
BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
BRANCH_END
};
int
parse_branch_stack(const struct option *opt, const char *str, int unset)
{
#define ONLY_PLM \
(PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
uint64_t *mode = (uint64_t *)opt->value;
const struct branch_mode *br;
char *s, *os = NULL, *p;
int ret = -1;
if (unset)
return 0;
/*
* cannot set it twice, -b + --branch-filter for instance
*/
if (*mode)
return -1;
/* str may be NULL in case no arg is passed to -b */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (br = branch_modes; br->name; br++) {
if (!strcasecmp(s, br->name))
break;
}
if (!br->name) {
ui__warning("unknown branch filter %s,"
" check man page\n", s);
goto error;
}
*mode |= br->mode;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
/* default to any branch */
if ((*mode & ~ONLY_PLM) == 0) {
*mode = PERF_SAMPLE_BRANCH_ANY;
}
error:
free(os);
return ret;
}
#ifndef _PERF_PARSE_BRANCH_OPTIONS_H
#define _PERF_PARSE_BRANCH_OPTIONS_H 1
struct option;
int parse_branch_stack(const struct option *opt, const char *str, int unset);
#endif /* _PERF_PARSE_BRANCH_OPTIONS_H */
......@@ -163,7 +163,7 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
static struct map *kernel_get_module_map(const char *module)
{
struct map_groups *grp = &host_machine->kmaps;
struct rb_root *maps = &grp->maps[MAP__FUNCTION];
struct maps *maps = &grp->maps[MAP__FUNCTION];
struct map *pos;
/* A file path -- this is an offline module */
......@@ -195,7 +195,7 @@ static void put_target_map(struct map *map, bool user)
{
if (map && user) {
/* Only the user map needs to be released */
map__delete(map);
map__put(map);
}
}
......@@ -1791,7 +1791,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
out:
if (map && !is_kprobe) {
map__delete(map);
map__put(map);
}
return ret;
......@@ -2884,7 +2884,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
end:
if (user) {
map__delete(map);
map__put(map);
}
exit_symbol_maps();
......
......@@ -972,8 +972,10 @@ int dso__load_sym(struct dso *dso, struct map *map,
map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */
if (kmaps) {
map__get(map);
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
map__put(map);
}
}
......
......@@ -202,12 +202,14 @@ void symbols__fixup_end(struct rb_root *symbols)
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
{
struct rb_root *maps = &mg->maps[type];
struct maps *maps = &mg->maps[type];
struct map *next, *curr;
pthread_rwlock_wrlock(&maps->lock);
curr = maps__first(maps);
if (curr == NULL)
return;
goto out_unlock;
for (next = map__next(curr); next; next = map__next(curr)) {
curr->end = next->start;
......@@ -219,6 +221,9 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
* last map final address.
*/
curr->end = ~0ULL;
out_unlock:
pthread_rwlock_unlock(&maps->lock);
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
......@@ -654,14 +659,14 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
curr_map = map_groups__find(kmaps, map->type, pos->start);
if (!curr_map || (filter && filter(curr_map, pos))) {
rb_erase(&pos->rb_node, root);
rb_erase_init(&pos->rb_node, root);
symbol__delete(pos);
} else {
pos->start -= curr_map->start - curr_map->pgoff;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
if (curr_map != map) {
rb_erase(&pos->rb_node, root);
rb_erase_init(&pos->rb_node, root);
symbols__insert(
&curr_map->dso->symbols[curr_map->type],
pos);
......@@ -1168,20 +1173,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
/* Add new maps */
while (!list_empty(&md.maps)) {
new_map = list_entry(md.maps.next, struct map, node);
list_del(&new_map->node);
list_del_init(&new_map->node);
if (new_map == replacement_map) {
map->start = new_map->start;
map->end = new_map->end;
map->pgoff = new_map->pgoff;
map->map_ip = new_map->map_ip;
map->unmap_ip = new_map->unmap_ip;
map__delete(new_map);
/* Ensure maps are correctly ordered */
map__get(map);
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
map__put(map);
} else {
map_groups__insert(kmaps, new_map);
}
map__put(new_map);
}
/*
......@@ -1206,8 +1214,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
out_err:
while (!list_empty(&md.maps)) {
map = list_entry(md.maps.next, struct map, node);
list_del(&map->node);
map__delete(map);
list_del_init(&map->node);
map__put(map);
}
close(fd);
return -EINVAL;
......@@ -1520,15 +1528,21 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
struct map *map_groups__find_by_name(struct map_groups *mg,
enum map_type type, const char *name)
{
struct rb_root *maps = &mg->maps[type];
struct maps *maps = &mg->maps[type];
struct map *map;
pthread_rwlock_rdlock(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
if (map->dso && strcmp(map->dso->short_name, name) == 0)
return map;
goto out_unlock;
}
return NULL;
map = NULL;
out_unlock:
pthread_rwlock_unlock(&maps->lock);
return map;
}
int dso__load_vmlinux(struct dso *dso, struct map *map,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment