Commit 152fefa9 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

. Fix some leaks in exit paths.

. Use memdup where applicable

. Remove some die() calls, allowing callers to handle exit paths
  gracefully.

. Correct typo in tools Makefile, fix from Borislav Petkov.

. Add 'perf bench numa mem' NUMA performance measurement suite, from Ingo Molnar.

. Handle dynamic array's element size properly, fix from Jiri Olsa.

. Fix memory leaks on evsel->counts, from Namhyung Kim.

. Make numa benchmark optional, allowing the build in machines where required
  numa libraries are not present, fix from Peter Hurley.

. Add interval printing in 'perf stat', from Stephane Eranian.

. Fix compile warnings in tests/attr.c, from Sukadev Bhattiprolu.

. Fix double free, pclose instead of fclose, leaks and double fclose errors
  found with the cppcheck tool, from Thomas Jarosch.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parents a2d28d0c 5809fde0
......@@ -15,7 +15,7 @@ help:
@echo ' x86_energy_perf_policy - Intel energy policy tool'
@echo ''
@echo 'You can do:'
@echo ' $$ make -C tools/<tool>_install'
@echo ' $$ make -C tools/ <tool>_install'
@echo ''
@echo ' from the kernel command line to build and install one of'
@echo ' the tools above'
......
......@@ -1223,6 +1223,34 @@ static int field_is_long(struct format_field *field)
return 0;
}
static unsigned int type_size(const char *name)
{
/* This covers all FIELD_IS_STRING types. */
static struct {
const char *type;
unsigned int size;
} table[] = {
{ "u8", 1 },
{ "u16", 2 },
{ "u32", 4 },
{ "u64", 8 },
{ "s8", 1 },
{ "s16", 2 },
{ "s32", 4 },
{ "s64", 8 },
{ "char", 1 },
{ },
};
int i;
for (i = 0; table[i].type; i++) {
if (!strcmp(table[i].type, name))
return table[i].size;
}
return 0;
}
static int event_read_fields(struct event_format *event, struct format_field **fields)
{
struct format_field *field = NULL;
......@@ -1232,6 +1260,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f
int count = 0;
do {
unsigned int size_dynamic = 0;
type = read_token(&token);
if (type == EVENT_NEWLINE) {
free_token(token);
......@@ -1390,6 +1420,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
field->type = new_type;
strcat(field->type, " ");
strcat(field->type, field->name);
size_dynamic = type_size(field->name);
free_token(field->name);
strcat(field->type, brackets);
field->name = token;
......@@ -1478,10 +1509,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f
if (field->flags & FIELD_IS_ARRAY) {
if (field->arraylen)
field->elementsize = field->size / field->arraylen;
else if (field->flags & FIELD_IS_DYNAMIC)
field->elementsize = size_dynamic;
else if (field->flags & FIELD_IS_STRING)
field->elementsize = 1;
else
field->elementsize = event->pevent->long_size;
else if (field->flags & FIELD_IS_LONG)
field->elementsize = event->pevent ?
event->pevent->long_size :
sizeof(long);
} else
field->elementsize = field->size;
......
......@@ -114,6 +114,10 @@ with it. --append may be used here. Examples:
perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage
-I msecs::
--interval-print msecs::
print count deltas every N milliseconds (minimum: 100ms)
example: perf stat -I 1000 -e cycles -a sleep 5
EXAMPLES
--------
......
......@@ -47,6 +47,8 @@ include config/utilities.mak
# backtrace post unwind.
#
# Define NO_BACKTRACE if you do not want stack backtrace debug feature
#
# Define NO_LIBNUMA if you do not want numa perf benchmark
$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
......@@ -838,6 +840,17 @@ ifndef NO_BACKTRACE
endif
endif
ifndef NO_LIBNUMA
FLAGS_LIBNUMA = $(ALL_CFLAGS) $(ALL_LDFLAGS) -lnuma
ifneq ($(call try-cc,$(SOURCE_LIBNUMA),$(FLAGS_LIBNUMA),libnuma),y)
msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev);
else
BASIC_CFLAGS += -DLIBNUMA_SUPPORT
BUILTIN_OBJS += $(OUTPUT)bench/numa.o
EXTLIBS += -lnuma
endif
endif
ifdef ASCIIDOC8
export ASCIIDOC8
endif
......
......@@ -155,6 +155,7 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
if (lookup_path(buf))
goto out;
free(buf);
buf = NULL;
}
if (!strcmp(arch, "arm"))
......
#ifndef BENCH_H
#define BENCH_H
extern int bench_numa(int argc, const char **argv, const char *prefix);
extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
extern int bench_mem_memcpy(int argc, const char **argv,
......
This diff is collapsed.
......@@ -35,6 +35,18 @@ struct bench_suite {
/* sentinel: easy for help */
#define suite_all { "all", "Test all benchmark suites", NULL }
#ifdef LIBNUMA_SUPPORT
static struct bench_suite numa_suites[] = {
{ "mem",
"Benchmark for NUMA workloads",
bench_numa },
suite_all,
{ NULL,
NULL,
NULL }
};
#endif
static struct bench_suite sched_suites[] = {
{ "messaging",
"Benchmark for scheduler and IPC mechanisms",
......@@ -68,6 +80,11 @@ struct bench_subsys {
};
static struct bench_subsys subsystems[] = {
#ifdef LIBNUMA_SUPPORT
{ "numa",
"NUMA scheduling and MM behavior",
numa_suites },
#endif
{ "sched",
"scheduler and IPC mechanism",
sched_suites },
......
......@@ -17,6 +17,7 @@
#include "util/debug.h"
#include <linux/rbtree.h>
#include <linux/string.h>
struct alloc_stat;
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
......@@ -618,12 +619,11 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
for (i = 0; i < NUM_AVAIL_SORTS; i++) {
if (!strcmp(avail_sorts[i]->name, tok)) {
sort = malloc(sizeof(*sort));
sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
if (!sort) {
pr_err("%s: malloc failed\n", __func__);
pr_err("%s: memdup failed\n", __func__);
return -1;
}
memcpy(sort, avail_sorts[i], sizeof(*sort));
list_add_tail(&sort->list, list);
return 0;
}
......
......@@ -65,6 +65,10 @@
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
static void print_stat(int argc, const char **argv);
static void print_counter_aggr(struct perf_evsel *counter, char *prefix);
static void print_counter(struct perf_evsel *counter, char *prefix);
static struct perf_evlist *evsel_list;
static struct perf_target target = {
......@@ -87,6 +91,8 @@ static FILE *output = NULL;
static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
static unsigned int interval = 0;
static struct timespec ref_time;
static volatile int done = 0;
......@@ -94,6 +100,28 @@ struct perf_stat {
struct stats res_stats[3];
};
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
r->tv_sec = a->tv_sec - b->tv_sec;
if (a->tv_nsec < b->tv_nsec) {
r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
r->tv_sec--;
} else {
r->tv_nsec = a->tv_nsec - b->tv_nsec ;
}
}
static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
{
return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
}
static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
{
return perf_evsel__cpus(evsel)->nr;
}
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{
evsel->priv = zalloc(sizeof(struct perf_stat));
......@@ -106,14 +134,27 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
evsel->priv = NULL;
}
static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
{
return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
void *addr;
size_t sz;
sz = sizeof(*evsel->counts) +
(perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values));
addr = zalloc(sz);
if (!addr)
return -ENOMEM;
evsel->prev_raw_counts = addr;
return 0;
}
static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
{
return perf_evsel__cpus(evsel)->nr;
free(evsel->prev_raw_counts);
evsel->prev_raw_counts = NULL;
}
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
......@@ -245,16 +286,69 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
static void print_interval(void)
{
static int num_print_interval;
struct perf_evsel *counter;
struct perf_stat *ps;
struct timespec ts, rs;
char prefix[64];
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node) {
ps = counter->priv;
memset(ps->res_stats, 0, sizeof(ps->res_stats));
read_counter(counter);
}
} else {
list_for_each_entry(counter, &evsel_list->entries, node) {
ps = counter->priv;
memset(ps->res_stats, 0, sizeof(ps->res_stats));
read_counter_aggr(counter);
}
}
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep);
if (num_print_interval == 0 && !csv_output) {
if (no_aggr)
fprintf(output, "# time CPU counts events\n");
else
fprintf(output, "# time counts events\n");
}
if (++num_print_interval == 25)
num_print_interval = 0;
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter(counter, prefix);
} else {
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter, prefix);
}
}
static int __run_perf_stat(int argc __maybe_unused, const char **argv)
{
char msg[512];
unsigned long long t0, t1;
struct perf_evsel *counter;
struct timespec ts;
int status = 0;
int child_ready_pipe[2], go_pipe[2];
const bool forks = (argc > 0);
char buf;
if (interval) {
ts.tv_sec = interval / 1000;
ts.tv_nsec = (interval % 1000) * 1000000;
} else {
ts.tv_sec = 1;
ts.tv_nsec = 0;
}
if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
perror("failed to create pipes");
return -1;
......@@ -347,14 +441,25 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
* Enable counters and exec the command:
*/
t0 = rdclock();
clock_gettime(CLOCK_MONOTONIC, &ref_time);
if (forks) {
close(go_pipe[1]);
if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) {
nanosleep(&ts, NULL);
print_interval();
}
}
wait(&status);
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
while(!done) sleep(1);
while (!done) {
nanosleep(&ts, NULL);
if (interval)
print_interval();
}
}
t1 = rdclock();
......@@ -440,7 +545,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
if (csv_output)
if (csv_output || interval)
return;
if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
......@@ -654,12 +759,11 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
if (csv_output)
if (csv_output || interval)
return;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = avg_stats(&runtime_cycles_stats[cpu]);
if (total)
ratio = avg / total;
......@@ -753,12 +857,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
* Print out the results of a single counter:
* aggregated counts in system-wide mode
*/
static void print_counter_aggr(struct perf_evsel *counter)
static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
{
struct perf_stat *ps = counter->priv;
double avg = avg_stats(&ps->res_stats[0]);
int scaled = counter->counts->scaled;
if (prefix)
fprintf(output, "%s", prefix);
if (scaled == -1) {
fprintf(output, "%*s%s%*s",
csv_output ? 0 : 18,
......@@ -801,7 +908,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
* Print out the results of a single counter:
* does not use aggregated count in system-wide
*/
static void print_counter(struct perf_evsel *counter)
static void print_counter(struct perf_evsel *counter, char *prefix)
{
u64 ena, run, val;
int cpu;
......@@ -810,6 +917,10 @@ static void print_counter(struct perf_evsel *counter)
val = counter->counts->cpu[cpu].val;
ena = counter->counts->cpu[cpu].ena;
run = counter->counts->cpu[cpu].run;
if (prefix)
fprintf(output, "%s", prefix);
if (run == 0 || ena == 0) {
fprintf(output, "CPU%*d%s%*s%s%*s",
csv_output ? 0 : -4,
......@@ -871,10 +982,10 @@ static void print_stat(int argc, const char **argv)
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter(counter);
print_counter(counter, NULL);
} else {
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter);
print_counter_aggr(counter, NULL);
}
if (!csv_output) {
......@@ -895,7 +1006,7 @@ static volatile int signr = -1;
static void skip_signal(int signo)
{
if(child_pid == -1)
if ((child_pid == -1) || interval)
done = 1;
signr = signo;
......@@ -1115,6 +1226,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
"command to run prior to the measured command"),
OPT_STRING(0, "post", &post_cmd, "command",
"command to run after to the measured command"),
OPT_UINTEGER('I', "interval-print", &interval,
"print counts at regular interval in ms (>= 100)"),
OPT_END()
};
const char * const stat_usage[] = {
......@@ -1215,12 +1328,23 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(stat_usage, options);
return -1;
}
if (interval && interval < 100) {
pr_err("print interval must be >= 100ms\n");
usage_with_options(stat_usage, options);
return -1;
}
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
goto out_free_fd;
}
if (interval) {
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_prev_raw_counts(pos) < 0)
goto out_free_fd;
}
}
/*
* We dont want to block the signals - that would cause
......@@ -1230,6 +1354,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
*/
atexit(sig_atexit);
signal(SIGINT, skip_signal);
signal(SIGCHLD, skip_signal);
signal(SIGALRM, skip_signal);
signal(SIGABRT, skip_signal);
......@@ -1242,11 +1367,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
status = run_perf_stat(argc, argv);
}
if (status != -1)
if (status != -1 && !interval)
print_stat(argc, argv);
out_free_fd:
list_for_each_entry(pos, &evsel_list->entries, node)
list_for_each_entry(pos, &evsel_list->entries, node) {
perf_evsel__free_stat_priv(pos);
perf_evsel__free_counts(pos);
perf_evsel__free_prev_raw_counts(pos);
}
perf_evlist__delete_maps(evsel_list);
out:
perf_evlist__delete(evsel_list);
......
......@@ -225,3 +225,14 @@ int main(void)
return on_exit(NULL, NULL);
}
endef
define SOURCE_LIBNUMA
#include <numa.h>
#include <numaif.h>
int main(void)
{
numa_available();
return 0;
}
endef
\ No newline at end of file
......@@ -19,6 +19,11 @@
* permissions. All the event text files are stored there.
*/
/*
* Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
* 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
*/
#define __SANE_USERSPACE_TYPES__
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
......
......@@ -98,6 +98,7 @@ int test__open_syscall_event_on_all_cpus(void)
}
}
perf_evsel__free_counts(evsel);
out_close_fd:
perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
......
......@@ -96,7 +96,7 @@ int test__PERF_RECORD(void)
err = perf_evlist__prepare_workload(evlist, &opts, argv);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
goto out_delete_evlist;
goto out_delete_maps;
}
/*
......@@ -111,7 +111,7 @@ int test__PERF_RECORD(void)
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {
pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_delete_maps;
}
cpu = err;
......@@ -121,7 +121,7 @@ int test__PERF_RECORD(void)
*/
if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
pr_debug("sched_setaffinity: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_delete_maps;
}
/*
......@@ -131,7 +131,7 @@ int test__PERF_RECORD(void)
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_delete_maps;
}
/*
......@@ -142,7 +142,7 @@ int test__PERF_RECORD(void)
err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_delete_maps;
}
/*
......@@ -305,6 +305,8 @@ int test__PERF_RECORD(void)
}
out_err:
perf_evlist__munmap(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
......
......@@ -44,7 +44,7 @@ int test__vmlinux_matches_kallsyms(void)
*/
if (machine__create_kernel_maps(&kallsyms) < 0) {
pr_debug("machine__create_kernel_maps ");
return -1;
goto out;
}
/*
......@@ -227,5 +227,7 @@ int test__vmlinux_matches_kallsyms(void)
map__fprintf(pos, stderr);
}
out:
machine__exit(&kallsyms);
machine__exit(&vmlinux);
return err;
}
......@@ -273,6 +273,8 @@ void ui_browser__hide(struct ui_browser *browser __maybe_unused)
{
pthread_mutex_lock(&ui__lock);
ui_helpline__pop();
free(browser->helpline);
browser->helpline = NULL;
pthread_mutex_unlock(&ui__lock);
}
......
......@@ -476,8 +476,10 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
}
}
if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
free(event);
return -ENOENT;
}
map = machine->vmlinux_maps[MAP__FUNCTION];
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
......
......@@ -640,6 +640,11 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
}
}
void perf_evsel__free_counts(struct perf_evsel *evsel)
{
free(evsel->counts);
}
void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
......@@ -659,6 +664,28 @@ void perf_evsel__delete(struct perf_evsel *evsel)
free(evsel);
}
static inline void compute_deltas(struct perf_evsel *evsel,
int cpu,
struct perf_counts_values *count)
{
struct perf_counts_values tmp;
if (!evsel->prev_raw_counts)
return;
if (cpu == -1) {
tmp = evsel->prev_raw_counts->aggr;
evsel->prev_raw_counts->aggr = *count;
} else {
tmp = evsel->prev_raw_counts->cpu[cpu];
evsel->prev_raw_counts->cpu[cpu] = *count;
}
count->val = count->val - tmp.val;
count->ena = count->ena - tmp.ena;
count->run = count->run - tmp.run;
}
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale)
{
......@@ -674,6 +701,8 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
return -errno;
compute_deltas(evsel, cpu, &count);
if (scale) {
if (count.run == 0)
count.val = 0;
......@@ -712,6 +741,8 @@ int __perf_evsel__read(struct perf_evsel *evsel,
}
}
compute_deltas(evsel, -1, aggr);
evsel->counts->scaled = 0;
if (scale) {
if (aggr->run == 0) {
......
......@@ -53,6 +53,7 @@ struct perf_evsel {
struct xyarray *sample_id;
u64 *id;
struct perf_counts *counts;
struct perf_counts *prev_raw_counts;
int idx;
u32 ids;
struct hists hists;
......@@ -116,6 +117,7 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_fd(struct perf_evsel *evsel);
void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__free_counts(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
......
......@@ -313,7 +313,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
if (is_kallsyms) {
if (symbol_conf.kptr_restrict) {
pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
return 0;
err = 0;
goto out_free;
}
realname = (char *) name;
} else
......@@ -954,6 +955,7 @@ static int write_topo_node(int fd, int node)
}
fclose(fp);
fp = NULL;
ret = do_write(fd, &mem_total, sizeof(u64));
if (ret)
......@@ -980,7 +982,8 @@ static int write_topo_node(int fd, int node)
ret = do_write_string(fd, buf);
done:
free(buf);
fclose(fp);
if (fp)
fclose(fp);
return ret;
}
......@@ -2921,16 +2924,22 @@ int perf_event__process_tracing_data(union perf_event *event,
session->repipe);
padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
if (readn(session->fd, buf, padding) < 0)
die("reading input file");
if (readn(session->fd, buf, padding) < 0) {
pr_err("%s: reading input file", __func__);
return -1;
}
if (session->repipe) {
int retw = write(STDOUT_FILENO, buf, padding);
if (retw <= 0 || retw != padding)
die("repiping tracing data padding");
if (retw <= 0 || retw != padding) {
pr_err("%s: repiping tracing data padding", __func__);
return -1;
}
}
if (size_read + padding != size)
die("tracing data size mismatch");
if (size_read + padding != size) {
pr_err("%s: tracing data size mismatch", __func__);
return -1;
}
perf_evlist__prepare_tracepoint_events(session->evlist,
session->pevent);
......
......@@ -11,6 +11,7 @@
#include "strlist.h"
#include "vdso.h"
#include "build-id.h"
#include <linux/string.h>
const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
......@@ -29,29 +30,29 @@ static inline int is_no_dso_memory(const char *filename)
!strcmp(filename, "[heap]");
}
void map__init(struct map *self, enum map_type type,
void map__init(struct map *map, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso)
{
self->type = type;
self->start = start;
self->end = end;
self->pgoff = pgoff;
self->dso = dso;
self->map_ip = map__map_ip;
self->unmap_ip = map__unmap_ip;
RB_CLEAR_NODE(&self->rb_node);
self->groups = NULL;
self->referenced = false;
self->erange_warned = false;
map->type = type;
map->start = start;
map->end = end;
map->pgoff = pgoff;
map->dso = dso;
map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip;
RB_CLEAR_NODE(&map->rb_node);
map->groups = NULL;
map->referenced = false;
map->erange_warned = false;
}
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
u64 pgoff, u32 pid, char *filename,
enum map_type type)
{
struct map *self = malloc(sizeof(*self));
struct map *map = malloc(sizeof(*map));
if (self != NULL) {
if (map != NULL) {
char newfilename[PATH_MAX];
struct dso *dso;
int anon, no_dso, vdso;
......@@ -74,10 +75,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
if (dso == NULL)
goto out_delete;
map__init(self, type, start, start + len, pgoff, dso);
map__init(map, type, start, start + len, pgoff, dso);
if (anon || no_dso) {
self->map_ip = self->unmap_ip = identity__map_ip;
map->map_ip = map->unmap_ip = identity__map_ip;
/*
* Set memory without DSO as loaded. All map__find_*
......@@ -85,12 +86,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
* unnecessary map__load warning.
*/
if (no_dso)
dso__set_loaded(dso, self->type);
dso__set_loaded(dso, map->type);
}
}
return self;
return map;
out_delete:
free(self);
free(map);
return NULL;
}
......@@ -113,48 +114,48 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
return map;
}
void map__delete(struct map *self)
void map__delete(struct map *map)
{
free(self);
free(map);
}
void map__fixup_start(struct map *self)
void map__fixup_start(struct map *map)
{
struct rb_root *symbols = &self->dso->symbols[self->type];
struct rb_root *symbols = &map->dso->symbols[map->type];
struct rb_node *nd = rb_first(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
self->start = sym->start;
map->start = sym->start;
}
}
void map__fixup_end(struct map *self)
void map__fixup_end(struct map *map)
{
struct rb_root *symbols = &self->dso->symbols[self->type];
struct rb_root *symbols = &map->dso->symbols[map->type];
struct rb_node *nd = rb_last(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
self->end = sym->end;
map->end = sym->end;
}
}
#define DSO__DELETED "(deleted)"
int map__load(struct map *self, symbol_filter_t filter)
int map__load(struct map *map, symbol_filter_t filter)
{
const char *name = self->dso->long_name;
const char *name = map->dso->long_name;
int nr;
if (dso__loaded(self->dso, self->type))
if (dso__loaded(map->dso, map->type))
return 0;
nr = dso__load(self->dso, self, filter);
nr = dso__load(map->dso, map, filter);
if (nr < 0) {
if (self->dso->has_build_id) {
if (map->dso->has_build_id) {
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(self->dso->build_id,
sizeof(self->dso->build_id),
build_id__sprintf(map->dso->build_id,
sizeof(map->dso->build_id),
sbuild_id);
pr_warning("%s with build id %s not found",
name, sbuild_id);
......@@ -184,43 +185,36 @@ int map__load(struct map *self, symbol_filter_t filter)
* Only applies to the kernel, as its symtabs aren't relative like the
* module ones.
*/
if (self->dso->kernel)
map__reloc_vmlinux(self);
if (map->dso->kernel)
map__reloc_vmlinux(map);
return 0;
}
struct symbol *map__find_symbol(struct map *self, u64 addr,
struct symbol *map__find_symbol(struct map *map, u64 addr,
symbol_filter_t filter)
{
if (map__load(self, filter) < 0)
if (map__load(map, filter) < 0)
return NULL;
return dso__find_symbol(self->dso, self->type, addr);
return dso__find_symbol(map->dso, map->type, addr);
}
struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
symbol_filter_t filter)
{
if (map__load(self, filter) < 0)
if (map__load(map, filter) < 0)
return NULL;
if (!dso__sorted_by_name(self->dso, self->type))
dso__sort_by_name(self->dso, self->type);
if (!dso__sorted_by_name(map->dso, map->type))
dso__sort_by_name(map->dso, map->type);
return dso__find_symbol_by_name(self->dso, self->type, name);
return dso__find_symbol_by_name(map->dso, map->type, name);
}
struct map *map__clone(struct map *self)
struct map *map__clone(struct map *map)
{
struct map *map = malloc(sizeof(*self));
if (!map)
return NULL;
memcpy(map, self, sizeof(*self));
return map;
return memdup(map, sizeof(*map));
}
int map__overlap(struct map *l, struct map *r)
......@@ -237,10 +231,10 @@ int map__overlap(struct map *l, struct map *r)
return 0;
}
size_t map__fprintf(struct map *self, FILE *fp)
size_t map__fprintf(struct map *map, FILE *fp)
{
return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
self->start, self->end, self->pgoff, self->dso->name);
map->start, map->end, map->pgoff, map->dso->name);
}
size_t map__fprintf_dsoname(struct map *map, FILE *fp)
......@@ -528,9 +522,9 @@ static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
return ip - (s64)map->pgoff;
}
void map__reloc_vmlinux(struct map *self)
void map__reloc_vmlinux(struct map *map)
{
struct kmap *kmap = map__kmap(self);
struct kmap *kmap = map__kmap(map);
s64 reloc;
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
......@@ -542,9 +536,9 @@ void map__reloc_vmlinux(struct map *self)
if (!reloc)
return;
self->map_ip = map__reloc_map_ip;
self->unmap_ip = map__reloc_unmap_ip;
self->pgoff = reloc;
map->map_ip = map__reloc_map_ip;
map->unmap_ip = map__reloc_unmap_ip;
map->pgoff = reloc;
}
void maps__insert(struct rb_root *maps, struct map *map)
......@@ -567,9 +561,9 @@ void maps__insert(struct rb_root *maps, struct map *map)
rb_insert_color(&map->rb_node, maps);
}
void maps__remove(struct rb_root *self, struct map *map)
void maps__remove(struct rb_root *maps, struct map *map)
{
rb_erase(&map->rb_node, self);
rb_erase(&map->rb_node, maps);
}
struct map *maps__find(struct rb_root *maps, u64 ip)
......
......@@ -57,9 +57,9 @@ struct map_groups {
struct machine *machine;
};
static inline struct kmap *map__kmap(struct map *self)
static inline struct kmap *map__kmap(struct map *map)
{
return (struct kmap *)(self + 1);
return (struct kmap *)(map + 1);
}
static inline u64 map__map_ip(struct map *map, u64 ip)
......@@ -85,27 +85,27 @@ struct symbol;
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *self, enum map_type type,
void map__init(struct map *map, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso);
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
u64 pgoff, u32 pid, char *filename,
enum map_type type);
struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
void map__delete(struct map *self);
struct map *map__clone(struct map *self);
void map__delete(struct map *map);
struct map *map__clone(struct map *map);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *self, FILE *fp);
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
int map__load(struct map *self, symbol_filter_t filter);
struct symbol *map__find_symbol(struct map *self,
int map__load(struct map *map, symbol_filter_t filter);
struct symbol *map__find_symbol(struct map *map,
u64 addr, symbol_filter_t filter);
struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
symbol_filter_t filter);
void map__fixup_start(struct map *self);
void map__fixup_end(struct map *self);
void map__fixup_start(struct map *map);
void map__fixup_end(struct map *map);
void map__reloc_vmlinux(struct map *self);
void map__reloc_vmlinux(struct map *map);
size_t __map_groups__fprintf_maps(struct map_groups *mg,
enum map_type type, int verbose, FILE *fp);
......
......@@ -249,7 +249,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
size_t size,
unsigned int width __maybe_unused)
{
FILE *fp;
FILE *fp = NULL;
char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
size_t line_len;
......@@ -270,7 +270,6 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
if (getline(&path, &line_len, fp) < 0 || !line_len)
goto out_ip;
fclose(fp);
self->srcline = strdup(path);
if (self->srcline == NULL)
goto out_ip;
......@@ -280,8 +279,12 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
*nl = '\0';
path = self->srcline;
out_path:
if (fp)
pclose(fp);
return repsep_snprintf(bf, size, "%s", path);
out_ip:
if (fp)
pclose(fp);
return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
}
......
......@@ -35,11 +35,11 @@ struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
return NULL;
}
static void str_node__delete(struct str_node *self, bool dupstr)
static void str_node__delete(struct str_node *snode, bool dupstr)
{
if (dupstr)
free((void *)self->s);
free(self);
free((void *)snode->s);
free(snode);
}
static
......@@ -59,12 +59,12 @@ static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
return strcmp(snode->s, str);
}
int strlist__add(struct strlist *self, const char *new_entry)
int strlist__add(struct strlist *slist, const char *new_entry)
{
return rblist__add_node(&self->rblist, new_entry);
return rblist__add_node(&slist->rblist, new_entry);
}
int strlist__load(struct strlist *self, const char *filename)
int strlist__load(struct strlist *slist, const char *filename)
{
char entry[1024];
int err;
......@@ -80,7 +80,7 @@ int strlist__load(struct strlist *self, const char *filename)
continue;
entry[len - 1] = '\0';
err = strlist__add(self, entry);
err = strlist__add(slist, entry);
if (err != 0)
goto out;
}
......@@ -107,56 +107,56 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry)
return snode;
}
static int strlist__parse_list_entry(struct strlist *self, const char *s)
static int strlist__parse_list_entry(struct strlist *slist, const char *s)
{
if (strncmp(s, "file://", 7) == 0)
return strlist__load(self, s + 7);
return strlist__load(slist, s + 7);
return strlist__add(self, s);
return strlist__add(slist, s);
}
int strlist__parse_list(struct strlist *self, const char *s)
int strlist__parse_list(struct strlist *slist, const char *s)
{
char *sep;
int err;
while ((sep = strchr(s, ',')) != NULL) {
*sep = '\0';
err = strlist__parse_list_entry(self, s);
err = strlist__parse_list_entry(slist, s);
*sep = ',';
if (err != 0)
return err;
s = sep + 1;
}
return *s ? strlist__parse_list_entry(self, s) : 0;
return *s ? strlist__parse_list_entry(slist, s) : 0;
}
struct strlist *strlist__new(bool dupstr, const char *slist)
struct strlist *strlist__new(bool dupstr, const char *list)
{
struct strlist *self = malloc(sizeof(*self));
struct strlist *slist = malloc(sizeof(*slist));
if (self != NULL) {
rblist__init(&self->rblist);
self->rblist.node_cmp = strlist__node_cmp;
self->rblist.node_new = strlist__node_new;
self->rblist.node_delete = strlist__node_delete;
if (slist != NULL) {
rblist__init(&slist->rblist);
slist->rblist.node_cmp = strlist__node_cmp;
slist->rblist.node_new = strlist__node_new;
slist->rblist.node_delete = strlist__node_delete;
self->dupstr = dupstr;
if (slist && strlist__parse_list(self, slist) != 0)
slist->dupstr = dupstr;
if (slist && strlist__parse_list(slist, list) != 0)
goto out_error;
}
return self;
return slist;
out_error:
free(self);
free(slist);
return NULL;
}
void strlist__delete(struct strlist *self)
void strlist__delete(struct strlist *slist)
{
if (self != NULL)
rblist__delete(&self->rblist);
if (slist != NULL)
rblist__delete(&slist->rblist);
}
struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
......
......@@ -17,34 +17,34 @@ struct strlist {
};
struct strlist *strlist__new(bool dupstr, const char *slist);
void strlist__delete(struct strlist *self);
void strlist__delete(struct strlist *slist);
void strlist__remove(struct strlist *self, struct str_node *sn);
int strlist__load(struct strlist *self, const char *filename);
int strlist__add(struct strlist *self, const char *str);
void strlist__remove(struct strlist *slist, struct str_node *sn);
int strlist__load(struct strlist *slist, const char *filename);
int strlist__add(struct strlist *slist, const char *str);
struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
struct str_node *strlist__find(struct strlist *self, const char *entry);
struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx);
struct str_node *strlist__find(struct strlist *slist, const char *entry);
static inline bool strlist__has_entry(struct strlist *self, const char *entry)
static inline bool strlist__has_entry(struct strlist *slist, const char *entry)
{
return strlist__find(self, entry) != NULL;
return strlist__find(slist, entry) != NULL;
}
static inline bool strlist__empty(const struct strlist *self)
static inline bool strlist__empty(const struct strlist *slist)
{
return rblist__empty(&self->rblist);
return rblist__empty(&slist->rblist);
}
static inline unsigned int strlist__nr_entries(const struct strlist *self)
static inline unsigned int strlist__nr_entries(const struct strlist *slist)
{
return rblist__nr_entries(&self->rblist);
return rblist__nr_entries(&slist->rblist);
}
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *self)
static inline struct str_node *strlist__first(struct strlist *slist)
{
struct rb_node *rn = rb_first(&self->rblist.entries);
struct rb_node *rn = rb_first(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
......@@ -59,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn)
/**
* strlist_for_each - iterate over a strlist
* @pos: the &struct str_node to use as a loop cursor.
* @self: the &struct strlist for loop.
* @slist: the &struct strlist for loop.
*/
#define strlist__for_each(pos, self) \
for (pos = strlist__first(self); pos; pos = strlist__next(pos))
#define strlist__for_each(pos, slist) \
for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
/**
* strlist_for_each_safe - iterate over a strlist safe against removal of
* str_node
* @pos: the &struct str_node to use as a loop cursor.
* @n: another &struct str_node to use as temporary storage.
* @self: the &struct strlist for loop.
* @slist: the &struct strlist for loop.
*/
#define strlist__for_each_safe(pos, n, self) \
for (pos = strlist__first(self), n = strlist__next(pos); pos;\
#define strlist__for_each_safe(pos, n, slist) \
for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
pos = n, n = strlist__next(n))
int strlist__parse_list(struct strlist *self, const char *s);
int strlist__parse_list(struct strlist *slist, const char *s);
#endif /* __PERF_STRLIST_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment