Commit 79928928 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo-2' of...

Merge tag 'perf-core-for-mingo-2' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - List perf probes to stdout. (Masami Hiramatsu)

  - Return error when none of the requested probes were
    installed. (Masami Hiramatsu)

  - Cut off the gcc optimization postfixes from
    function name in 'perf probe'. (Masami Hiramatsu)

  - Allow disabling/enabling events dynamicly in 'perf top':
    a 'perf top' session can instantly become a 'perf report'
    one, i.e. going from dynamic analysis to a static one,
    returning to a dynamic one is possible, to toogle the
    modes, just press CTRL+z. (Arnaldo Carvalho de Melo)

  - Greatly speed up 'perf probe --list' by caching debuginfo.
    (Masami Hiramatsu)

  - Fix 'perf trace' race condition at the end of started
    workloads. (Sukadev Bhattiprolu)

  - Fix a problem when opening old perf.data with different
    byte order. (Wang Nan)

Infrastructure changes:

  - Replace map->referenced & maps->removed_maps with
    map->refcnt. (Arnaldo Carvalho de Melo)

  - Introduce the xyarray__reset() function. (Jiri Olsa)

  - Add thread_map__(alloc|realloc)() helpers. (Jiri Olsa)

  - Move perf_evsel__(alloc|free|reset)_counts into stat object. (Jiri Olsa)

  - Introduce perf_counts__(new|delete|reset)() functions. (Jiri Olsa)

  - Ignore .config-detected in .gitignore. (Wang Nan)

  - Move libtraceevent dynamic list to separated LDFLAGS
    variable. (Wang Nan)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 61d67d56 5d484f99
...@@ -28,3 +28,4 @@ config.mak.autogen ...@@ -28,3 +28,4 @@ config.mak.autogen
*-flex.* *-flex.*
*.pyc *.pyc
*.pyo *.pyo
.config-detected
...@@ -174,7 +174,7 @@ LIBTRACEEVENT = $(TE_PATH)libtraceevent.a ...@@ -174,7 +174,7 @@ LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
export LIBTRACEEVENT export LIBTRACEEVENT
LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list
LDFLAGS += -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYNAMIC_LIST) LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS = -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYNAMIC_LIST)
LIBAPI = $(LIB_PATH)libapi.a LIBAPI = $(LIB_PATH)libapi.a
export LIBAPI export LIBAPI
...@@ -190,8 +190,9 @@ python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT ...@@ -190,8 +190,9 @@ python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT
PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI) PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \ $(QUIET_GEN)CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
$(PYTHON_WORD) util/setup.py \
--quiet build_ext; \ --quiet build_ext; \
mkdir -p $(OUTPUT)python && \ mkdir -p $(OUTPUT)python && \
cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/ cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
...@@ -282,7 +283,8 @@ $(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE ...@@ -282,7 +283,8 @@ $(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE
$(Q)$(MAKE) $(build)=perf $(Q)$(MAKE) $(build)=perf
$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST) $(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(PERF_IN) $(LIBS) -o $@ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
$(PERF_IN) $(LIBS) -o $@
$(GTK_IN): FORCE $(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk $(Q)$(MAKE) $(build)=gtk
......
...@@ -178,24 +178,19 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) ...@@ -178,24 +178,19 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
{ {
void *addr; struct perf_counts *counts;
size_t sz;
sz = sizeof(*evsel->counts) + counts = perf_counts__new(perf_evsel__nr_cpus(evsel));
(perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values)); if (counts)
evsel->prev_raw_counts = counts;
addr = zalloc(sz); return counts ? 0 : -ENOMEM;
if (!addr)
return -ENOMEM;
evsel->prev_raw_counts = addr;
return 0;
} }
static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
{ {
zfree(&evsel->prev_raw_counts); perf_counts__delete(evsel->prev_raw_counts);
evsel->prev_raw_counts = NULL;
} }
static void perf_evlist__free_stats(struct perf_evlist *evlist) static void perf_evlist__free_stats(struct perf_evlist *evlist)
......
...@@ -235,10 +235,13 @@ static void perf_top__show_details(struct perf_top *top) ...@@ -235,10 +235,13 @@ static void perf_top__show_details(struct perf_top *top)
more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
0, top->sym_pcnt_filter, top->print_entries, 4); 0, top->sym_pcnt_filter, top->print_entries, 4);
if (top->zero)
symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx); if (top->evlist->enabled) {
else if (top->zero)
symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx); symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
else
symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
}
if (more != 0) if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more); printf("%d lines not displayed, maybe increase display entries [e]\n", more);
out_unlock: out_unlock:
...@@ -276,11 +279,13 @@ static void perf_top__print_sym_table(struct perf_top *top) ...@@ -276,11 +279,13 @@ static void perf_top__print_sym_table(struct perf_top *top)
return; return;
} }
if (top->zero) { if (top->evlist->enabled) {
hists__delete_entries(hists); if (top->zero) {
} else { hists__delete_entries(hists);
hists__decay_entries(hists, top->hide_user_symbols, } else {
top->hide_kernel_symbols); hists__decay_entries(hists, top->hide_user_symbols,
top->hide_kernel_symbols);
}
} }
hists__collapse_resort(hists, NULL); hists__collapse_resort(hists, NULL);
...@@ -545,11 +550,13 @@ static void perf_top__sort_new_samples(void *arg) ...@@ -545,11 +550,13 @@ static void perf_top__sort_new_samples(void *arg)
hists = evsel__hists(t->sym_evsel); hists = evsel__hists(t->sym_evsel);
if (t->zero) { if (t->evlist->enabled) {
hists__delete_entries(hists); if (t->zero) {
} else { hists__delete_entries(hists);
hists__decay_entries(hists, t->hide_user_symbols, } else {
t->hide_kernel_symbols); hists__decay_entries(hists, t->hide_user_symbols,
t->hide_kernel_symbols);
}
} }
hists__collapse_resort(hists, NULL); hists__collapse_resort(hists, NULL);
...@@ -579,8 +586,21 @@ static void *display_thread_tui(void *arg) ...@@ -579,8 +586,21 @@ static void *display_thread_tui(void *arg)
hists->uid_filter_str = top->record_opts.target.uid_str; hists->uid_filter_str = top->record_opts.target.uid_str;
} }
perf_evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, while (true) {
&top->session->header.env); int key = perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
top->min_percent,
&top->session->header.env);
if (key != CTRL('z'))
break;
perf_evlist__toggle_enable(top->evlist);
/*
* No need to refresh, resort/decay histogram entries
* if we are not collecting samples:
*/
hbt.refresh = top->evlist->enabled ? top->delay_secs : 0;
}
done = 1; done = 1;
return NULL; return NULL;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "thread_map.h" #include "thread_map.h"
#include "cpumap.h" #include "cpumap.h"
#include "debug.h" #include "debug.h"
#include "stat.h"
int test__openat_syscall_event_on_all_cpus(void) int test__openat_syscall_event_on_all_cpus(void)
{ {
......
...@@ -1736,6 +1736,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ...@@ -1736,6 +1736,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"t Zoom into current Thread\n" "t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n" "V Verbose (DSO names in callchains, etc)\n"
"z Toggle zeroing of samples\n" "z Toggle zeroing of samples\n"
"CTRL+z Enable/Disable events\n"
"/ Filter symbol by name"; "/ Filter symbol by name";
if (browser == NULL) if (browser == NULL)
...@@ -1900,6 +1901,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ...@@ -1900,6 +1901,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
/* Fall thru */ /* Fall thru */
case 'q': case 'q':
case CTRL('c'): case CTRL('c'):
case CTRL('z'):
goto out_free_stack; goto out_free_stack;
default: default:
continue; continue;
......
...@@ -297,6 +297,8 @@ void perf_evlist__disable(struct perf_evlist *evlist) ...@@ -297,6 +297,8 @@ void perf_evlist__disable(struct perf_evlist *evlist)
PERF_EVENT_IOC_DISABLE, 0); PERF_EVENT_IOC_DISABLE, 0);
} }
} }
evlist->enabled = false;
} }
void perf_evlist__enable(struct perf_evlist *evlist) void perf_evlist__enable(struct perf_evlist *evlist)
...@@ -316,6 +318,13 @@ void perf_evlist__enable(struct perf_evlist *evlist) ...@@ -316,6 +318,13 @@ void perf_evlist__enable(struct perf_evlist *evlist)
PERF_EVENT_IOC_ENABLE, 0); PERF_EVENT_IOC_ENABLE, 0);
} }
} }
evlist->enabled = true;
}
void perf_evlist__toggle_enable(struct perf_evlist *evlist)
{
(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
} }
int perf_evlist__disable_event(struct perf_evlist *evlist, int perf_evlist__disable_event(struct perf_evlist *evlist,
...@@ -634,11 +643,18 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, ...@@ -634,11 +643,18 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{ {
struct perf_mmap *md = &evlist->mmap[idx]; struct perf_mmap *md = &evlist->mmap[idx];
u64 head = perf_mmap__read_head(md); u64 head;
u64 old = md->prev; u64 old = md->prev;
unsigned char *data = md->base + page_size; unsigned char *data = md->base + page_size;
union perf_event *event = NULL; union perf_event *event = NULL;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!atomic_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
if (evlist->overwrite) { if (evlist->overwrite) {
/* /*
* If we're further behind than half the buffer, there's a chance * If we're further behind than half the buffer, there's a chance
......
...@@ -41,6 +41,7 @@ struct perf_evlist { ...@@ -41,6 +41,7 @@ struct perf_evlist {
int nr_groups; int nr_groups;
int nr_mmaps; int nr_mmaps;
bool overwrite; bool overwrite;
bool enabled;
size_t mmap_len; size_t mmap_len;
int id_pos; int id_pos;
int is_pos; int is_pos;
...@@ -139,6 +140,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist); ...@@ -139,6 +140,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist);
void perf_evlist__disable(struct perf_evlist *evlist); void perf_evlist__disable(struct perf_evlist *evlist);
void perf_evlist__enable(struct perf_evlist *evlist); void perf_evlist__enable(struct perf_evlist *evlist);
void perf_evlist__toggle_enable(struct perf_evlist *evlist);
int perf_evlist__disable_event(struct perf_evlist *evlist, int perf_evlist__disable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel); struct perf_evsel *evsel);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "perf_regs.h" #include "perf_regs.h"
#include "debug.h" #include "debug.h"
#include "trace-event.h" #include "trace-event.h"
#include "stat.h"
static struct { static struct {
bool sample_id_all; bool sample_id_all;
...@@ -851,19 +852,6 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) ...@@ -851,19 +852,6 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
return 0; return 0;
} }
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
{
memset(evsel->counts, 0, (sizeof(*evsel->counts) +
(ncpus * sizeof(struct perf_counts_values))));
}
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
evsel->counts = zalloc((sizeof(*evsel->counts) +
(ncpus * sizeof(struct perf_counts_values))));
return evsel->counts != NULL ? 0 : -ENOMEM;
}
static void perf_evsel__free_fd(struct perf_evsel *evsel) static void perf_evsel__free_fd(struct perf_evsel *evsel)
{ {
xyarray__delete(evsel->fd); xyarray__delete(evsel->fd);
...@@ -891,11 +879,6 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) ...@@ -891,11 +879,6 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
} }
} }
void perf_evsel__free_counts(struct perf_evsel *evsel)
{
zfree(&evsel->counts);
}
void perf_evsel__exit(struct perf_evsel *evsel) void perf_evsel__exit(struct perf_evsel *evsel)
{ {
assert(list_empty(&evsel->node)); assert(list_empty(&evsel->node));
......
...@@ -170,9 +170,6 @@ const char *perf_evsel__group_name(struct perf_evsel *evsel); ...@@ -170,9 +170,6 @@ const char *perf_evsel__group_name(struct perf_evsel *evsel);
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size); int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_counts(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
......
...@@ -313,8 +313,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, ...@@ -313,8 +313,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
memset(&he->stat, 0, sizeof(he->stat)); memset(&he->stat, 0, sizeof(he->stat));
} }
if (he->ms.map) map__get(he->ms.map);
he->ms.map->referenced = true;
if (he->branch_info) { if (he->branch_info) {
/* /*
...@@ -324,6 +323,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, ...@@ -324,6 +323,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
*/ */
he->branch_info = malloc(sizeof(*he->branch_info)); he->branch_info = malloc(sizeof(*he->branch_info));
if (he->branch_info == NULL) { if (he->branch_info == NULL) {
map__zput(he->ms.map);
free(he->stat_acc); free(he->stat_acc);
free(he); free(he);
return NULL; return NULL;
...@@ -332,17 +332,13 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, ...@@ -332,17 +332,13 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
memcpy(he->branch_info, template->branch_info, memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info)); sizeof(*he->branch_info));
if (he->branch_info->from.map) map__get(he->branch_info->from.map);
he->branch_info->from.map->referenced = true; map__get(he->branch_info->to.map);
if (he->branch_info->to.map)
he->branch_info->to.map->referenced = true;
} }
if (he->mem_info) { if (he->mem_info) {
if (he->mem_info->iaddr.map) map__get(he->mem_info->iaddr.map);
he->mem_info->iaddr.map->referenced = true; map__get(he->mem_info->daddr.map);
if (he->mem_info->daddr.map)
he->mem_info->daddr.map->referenced = true;
} }
if (symbol_conf.use_callchain) if (symbol_conf.use_callchain)
...@@ -407,9 +403,8 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, ...@@ -407,9 +403,8 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
* the history counter to increment. * the history counter to increment.
*/ */
if (he->ms.map != entry->ms.map) { if (he->ms.map != entry->ms.map) {
he->ms.map = entry->ms.map; map__put(he->ms.map);
if (he->ms.map) he->ms.map = map__get(entry->ms.map);
he->ms.map->referenced = true;
} }
goto out; goto out;
} }
...@@ -933,8 +928,20 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) ...@@ -933,8 +928,20 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
void hist_entry__delete(struct hist_entry *he) void hist_entry__delete(struct hist_entry *he)
{ {
thread__zput(he->thread); thread__zput(he->thread);
zfree(&he->branch_info); map__zput(he->ms.map);
zfree(&he->mem_info);
if (he->branch_info) {
map__zput(he->branch_info->from.map);
map__zput(he->branch_info->to.map);
zfree(&he->branch_info);
}
if (he->mem_info) {
map__zput(he->mem_info->iaddr.map);
map__zput(he->mem_info->daddr.map);
zfree(&he->mem_info);
}
zfree(&he->stat_acc); zfree(&he->stat_acc);
free_srcline(he->srcline); free_srcline(he->srcline);
free_callchain(he->callchain); free_callchain(he->callchain);
......
...@@ -137,7 +137,6 @@ void map__init(struct map *map, enum map_type type, ...@@ -137,7 +137,6 @@ void map__init(struct map *map, enum map_type type,
map->unmap_ip = map__unmap_ip; map->unmap_ip = map__unmap_ip;
RB_CLEAR_NODE(&map->rb_node); RB_CLEAR_NODE(&map->rb_node);
map->groups = NULL; map->groups = NULL;
map->referenced = false;
map->erange_warned = false; map->erange_warned = false;
atomic_set(&map->refcnt, 1); atomic_set(&map->refcnt, 1);
} }
...@@ -439,7 +438,6 @@ static void maps__init(struct maps *maps) ...@@ -439,7 +438,6 @@ static void maps__init(struct maps *maps)
{ {
maps->entries = RB_ROOT; maps->entries = RB_ROOT;
pthread_rwlock_init(&maps->lock, NULL); pthread_rwlock_init(&maps->lock, NULL);
INIT_LIST_HEAD(&maps->removed_maps);
} }
void map_groups__init(struct map_groups *mg, struct machine *machine) void map_groups__init(struct map_groups *mg, struct machine *machine)
...@@ -466,21 +464,10 @@ static void __maps__purge(struct maps *maps) ...@@ -466,21 +464,10 @@ static void __maps__purge(struct maps *maps)
} }
} }
static void __maps__purge_removed_maps(struct maps *maps)
{
struct map *pos, *n;
list_for_each_entry_safe(pos, n, &maps->removed_maps, node) {
list_del_init(&pos->node);
map__put(pos);
}
}
static void maps__exit(struct maps *maps) static void maps__exit(struct maps *maps)
{ {
pthread_rwlock_wrlock(&maps->lock); pthread_rwlock_wrlock(&maps->lock);
__maps__purge(maps); __maps__purge(maps);
__maps__purge_removed_maps(maps);
pthread_rwlock_unlock(&maps->lock); pthread_rwlock_unlock(&maps->lock);
} }
...@@ -499,8 +486,6 @@ bool map_groups__empty(struct map_groups *mg) ...@@ -499,8 +486,6 @@ bool map_groups__empty(struct map_groups *mg)
for (i = 0; i < MAP__NR_TYPES; ++i) { for (i = 0; i < MAP__NR_TYPES; ++i) {
if (maps__first(&mg->maps[i])) if (maps__first(&mg->maps[i]))
return false; return false;
if (!list_empty(&mg->maps[i].removed_maps))
return false;
} }
return true; return true;
...@@ -621,7 +606,7 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, ...@@ -621,7 +606,7 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
return printed += maps__fprintf(&mg->maps[type], fp); return printed += maps__fprintf(&mg->maps[type], fp);
} }
static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp) size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
{ {
size_t printed = 0, i; size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i) for (i = 0; i < MAP__NR_TYPES; ++i)
...@@ -629,39 +614,6 @@ static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp) ...@@ -629,39 +614,6 @@ static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp)
return printed; return printed;
} }
static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
enum map_type type, FILE *fp)
{
struct map *pos;
size_t printed = 0;
list_for_each_entry(pos, &mg->maps[type].removed_maps, node) {
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
if (verbose > 1) {
printed += dso__fprintf(pos->dso, type, fp);
printed += fprintf(fp, "--\n");
}
}
return printed;
}
static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
printed += __map_groups__fprintf_removed_maps(mg, i, fp);
return printed;
}
size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
{
size_t printed = map_groups__fprintf_maps(mg, fp);
printed += fprintf(fp, "Removed maps:\n");
return printed + map_groups__fprintf_removed_maps(mg, fp);
}
static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
{ {
struct rb_root *root; struct rb_root *root;
...@@ -719,13 +671,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp ...@@ -719,13 +671,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
map__fprintf(after, fp); map__fprintf(after, fp);
} }
put_map: put_map:
/* map__put(pos);
* If we have references, just move them to a separate list.
*/
if (pos->referenced)
list_add_tail(&pos->node, &maps->removed_maps);
else
map__put(pos);
if (err) if (err)
goto out; goto out;
......
...@@ -34,7 +34,6 @@ struct map { ...@@ -34,7 +34,6 @@ struct map {
u64 start; u64 start;
u64 end; u64 end;
u8 /* enum map_type */ type; u8 /* enum map_type */ type;
bool referenced;
bool erange_warned; bool erange_warned;
u32 priv; u32 priv;
u32 prot; u32 prot;
...@@ -63,7 +62,6 @@ struct kmap { ...@@ -63,7 +62,6 @@ struct kmap {
struct maps { struct maps {
struct rb_root entries; struct rb_root entries;
pthread_rwlock_t lock; pthread_rwlock_t lock;
struct list_head removed_maps;
}; };
struct map_groups { struct map_groups {
...@@ -161,6 +159,14 @@ static inline struct map *map__get(struct map *map) ...@@ -161,6 +159,14 @@ static inline struct map *map__get(struct map *map)
void map__put(struct map *map); void map__put(struct map *map);
static inline void __map__zput(struct map **map)
{
map__put(*map);
*map = NULL;
}
#define map__zput(map) __map__zput(&map)
int map__overlap(struct map *l, struct map *r); int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *map, FILE *fp); size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp); size_t map__fprintf_dsoname(struct map *map, FILE *fp);
......
This diff is collapsed.
...@@ -16,6 +16,7 @@ util/util.c ...@@ -16,6 +16,7 @@ util/util.c
util/xyarray.c util/xyarray.c
util/cgroup.c util/cgroup.c
util/rblist.c util/rblist.c
util/stat.c
util/strlist.c util/strlist.c
util/trace-event.c util/trace-event.c
../../lib/rbtree.c ../../lib/rbtree.c
...@@ -517,20 +517,42 @@ void perf_event__attr_swap(struct perf_event_attr *attr) ...@@ -517,20 +517,42 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
{ {
attr->type = bswap_32(attr->type); attr->type = bswap_32(attr->type);
attr->size = bswap_32(attr->size); attr->size = bswap_32(attr->size);
attr->config = bswap_64(attr->config);
attr->sample_period = bswap_64(attr->sample_period); #define bswap_safe(f, n) \
attr->sample_type = bswap_64(attr->sample_type); (attr->size > (offsetof(struct perf_event_attr, f) + \
attr->read_format = bswap_64(attr->read_format); sizeof(attr->f) * (n)))
attr->wakeup_events = bswap_32(attr->wakeup_events); #define bswap_field(f, sz) \
attr->bp_type = bswap_32(attr->bp_type); do { \
attr->bp_addr = bswap_64(attr->bp_addr); if (bswap_safe(f, 0)) \
attr->bp_len = bswap_64(attr->bp_len); attr->f = bswap_##sz(attr->f); \
attr->branch_sample_type = bswap_64(attr->branch_sample_type); } while(0)
attr->sample_regs_user = bswap_64(attr->sample_regs_user); #define bswap_field_32(f) bswap_field(f, 32)
attr->sample_stack_user = bswap_32(attr->sample_stack_user); #define bswap_field_64(f) bswap_field(f, 64)
attr->aux_watermark = bswap_32(attr->aux_watermark);
bswap_field_64(config);
swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); bswap_field_64(sample_period);
bswap_field_64(sample_type);
bswap_field_64(read_format);
bswap_field_32(wakeup_events);
bswap_field_32(bp_type);
bswap_field_64(bp_addr);
bswap_field_64(bp_len);
bswap_field_64(branch_sample_type);
bswap_field_64(sample_regs_user);
bswap_field_32(sample_stack_user);
bswap_field_32(aux_watermark);
/*
* After read_format are bitfields. Check read_format because
* we are unable to use offsetof on bitfield.
*/
if (bswap_safe(read_format, 1))
swap_bitfield((u8 *) (&attr->read_format + 1),
sizeof(u64));
#undef bswap_field_64
#undef bswap_field_32
#undef bswap_field
#undef bswap_safe
} }
static void perf_event__hdr_attr_swap(union perf_event *event, static void perf_event__hdr_attr_swap(union perf_event *event,
......
...@@ -94,3 +94,39 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel) ...@@ -94,3 +94,39 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
} }
} }
} }
struct perf_counts *perf_counts__new(int ncpus)
{
int size = sizeof(struct perf_counts) +
ncpus * sizeof(struct perf_counts_values);
return zalloc(size);
}
void perf_counts__delete(struct perf_counts *counts)
{
free(counts);
}
static void perf_counts__reset(struct perf_counts *counts, int ncpus)
{
memset(counts, 0, (sizeof(*counts) +
(ncpus * sizeof(struct perf_counts_values))));
}
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
{
perf_counts__reset(evsel->counts, ncpus);
}
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
evsel->counts = perf_counts__new(ncpus);
return evsel->counts != NULL ? 0 : -ENOMEM;
}
void perf_evsel__free_counts(struct perf_evsel *evsel)
{
perf_counts__delete(evsel->counts);
evsel->counts = NULL;
}
...@@ -62,4 +62,10 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, ...@@ -62,4 +62,10 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel, void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
double avg, int cpu, enum aggr_mode aggr); double avg, int cpu, enum aggr_mode aggr);
struct perf_counts *perf_counts__new(int ncpus);
void perf_counts__delete(struct perf_counts *counts);
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_counts(struct perf_evsel *evsel);
#endif #endif
...@@ -20,6 +20,15 @@ static int filter(const struct dirent *dir) ...@@ -20,6 +20,15 @@ static int filter(const struct dirent *dir)
return 1; return 1;
} }
static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
{
size_t size = sizeof(*map) + sizeof(pid_t) * nr;
return realloc(map, size);
}
#define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
struct thread_map *thread_map__new_by_pid(pid_t pid) struct thread_map *thread_map__new_by_pid(pid_t pid)
{ {
struct thread_map *threads; struct thread_map *threads;
...@@ -33,7 +42,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid) ...@@ -33,7 +42,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
if (items <= 0) if (items <= 0)
return NULL; return NULL;
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); threads = thread_map__alloc(items);
if (threads != NULL) { if (threads != NULL) {
for (i = 0; i < items; i++) for (i = 0; i < items; i++)
threads->map[i] = atoi(namelist[i]->d_name); threads->map[i] = atoi(namelist[i]->d_name);
...@@ -49,7 +58,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid) ...@@ -49,7 +58,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
struct thread_map *thread_map__new_by_tid(pid_t tid) struct thread_map *thread_map__new_by_tid(pid_t tid)
{ {
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); struct thread_map *threads = thread_map__alloc(1);
if (threads != NULL) { if (threads != NULL) {
threads->map[0] = tid; threads->map[0] = tid;
...@@ -65,8 +74,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) ...@@ -65,8 +74,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
int max_threads = 32, items, i; int max_threads = 32, items, i;
char path[256]; char path[256];
struct dirent dirent, *next, **namelist = NULL; struct dirent dirent, *next, **namelist = NULL;
struct thread_map *threads = malloc(sizeof(*threads) + struct thread_map *threads = thread_map__alloc(max_threads);
max_threads * sizeof(pid_t));
if (threads == NULL) if (threads == NULL)
goto out; goto out;
...@@ -185,8 +194,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) ...@@ -185,8 +194,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
goto out_free_threads; goto out_free_threads;
total_tasks += items; total_tasks += items;
nt = realloc(threads, (sizeof(*threads) + nt = thread_map__realloc(threads, total_tasks);
sizeof(pid_t) * total_tasks));
if (nt == NULL) if (nt == NULL)
goto out_free_namelist; goto out_free_namelist;
...@@ -216,7 +224,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) ...@@ -216,7 +224,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
struct thread_map *thread_map__new_dummy(void) struct thread_map *thread_map__new_dummy(void)
{ {
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); struct thread_map *threads = thread_map__alloc(1);
if (threads != NULL) { if (threads != NULL) {
threads->map[0] = -1; threads->map[0] = -1;
...@@ -253,7 +261,7 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) ...@@ -253,7 +261,7 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
continue; continue;
ntasks++; ntasks++;
nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks); nt = thread_map__realloc(threads, ntasks);
if (nt == NULL) if (nt == NULL)
goto out_free_threads; goto out_free_threads;
......
...@@ -360,7 +360,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, ...@@ -360,7 +360,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
unw_word_t base = is_exec ? 0 : map->start; unw_word_t base = is_exec ? 0 : map->start;
if (fd >= 0) if (fd >= 0)
dso__data_put_fd(dso); dso__data_put_fd(map->dso);
memset(&di, 0, sizeof(di)); memset(&di, 0, sizeof(di));
if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name, if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
......
...@@ -9,11 +9,19 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size) ...@@ -9,11 +9,19 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
if (xy != NULL) { if (xy != NULL) {
xy->entry_size = entry_size; xy->entry_size = entry_size;
xy->row_size = row_size; xy->row_size = row_size;
xy->entries = xlen * ylen;
} }
return xy; return xy;
} }
void xyarray__reset(struct xyarray *xy)
{
size_t n = xy->entries * xy->entry_size;
memset(xy->contents, 0, n);
}
void xyarray__delete(struct xyarray *xy) void xyarray__delete(struct xyarray *xy)
{ {
free(xy); free(xy);
......
...@@ -6,11 +6,13 @@ ...@@ -6,11 +6,13 @@
struct xyarray { struct xyarray {
size_t row_size; size_t row_size;
size_t entry_size; size_t entry_size;
size_t entries;
char contents[]; char contents[];
}; };
struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
void xyarray__delete(struct xyarray *xy); void xyarray__delete(struct xyarray *xy);
void xyarray__reset(struct xyarray *xy);
static inline void *xyarray__entry(struct xyarray *xy, int x, int y) static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment