Commit 0a157124 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo-5.1-20190220' of...

Merge tag 'perf-core-for-mingo-5.1-20190220' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

perf report:

  He Kuang:

  - Don't shadow inlined symbol with different addr range.

perf script:

  Jiri Olsa:

  - Allow +- operator to ask for -F to add/remove fields to
    the default set, for instance to ask for the removal of the
    'cpu' field in tracepoint events, adding 'period' to that
    kind of events, etc.

perf test:

  Thomas Richter:

  - Fix scheduler tracepoint signedness of COMM fields failure of
    'evsel-tp-sched' test on s390 and other arches.

  Tommi Rantala:

  - Skip trace+probe_vfs_getname.sh when 'perf trace' is not built.

perf trace:

  Arnaldo Carvalho de Melo:

  - Add initial BPF map dumper, initially just for the current, minimal
    needs of the augmented_raw_syscalls BPF example used to collect
    pointer args payloads that uses BPF maps for pid and syscall filtering,
    but will in time have features similar to 'perf stat' --interval-print,
    --interval-clear, ways to signal from a BPF event that a specific
    map (or range of that map) should be printed, optionally as a
    histogram, etc.

General:

  Jiri Olsa:

  - Add CPU and NUMA topologies classes for further reuse, fixing some
    issues in the process.

  - Fixup some warnings and debug levels.

  - Make rm_rf() remove single file, not just directories.

Documentation:

  Jonas Rabenstein:

  - Fix HEADER_CMDLINE description in perf.data documentation.

  - Fix documentation of the Flags section in perf.data.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 9ed8f1a6 b4409ae1
...@@ -159,6 +159,12 @@ OPTIONS ...@@ -159,6 +159,12 @@ OPTIONS
the override, and the result of the above is that only S/W and H/W the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields. events are displayed with the given fields.
It's possible tp add/remove fields only for specific event type:
-Fsw:-cpu,-period
removes cpu and period from software events.
For the 'wildcard' option if a user selected field is invalid for an For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is event type, a message is displayed to the user that the option is
ignored for that type. For example: ignored for that type. For example:
......
...@@ -210,6 +210,14 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs. ...@@ -210,6 +210,14 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
may happen, for instance, when a thread gets migrated to a different CPU may happen, for instance, when a thread gets migrated to a different CPU
while processing a syscall. while processing a syscall.
--map-dump::
Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
dumps just boolean map values and integer keys, in time this will print in hex
by default and use BTF when available, as well as use functions to do pretty
printing using the existing 'perf trace' syscall arg beautifiers to map integer
arguments to strings (pid to comm, syscall id to syscall name, etc).
PAGEFAULTS PAGEFAULTS
---------- ----------
......
...@@ -43,11 +43,10 @@ struct perf_file_section { ...@@ -43,11 +43,10 @@ struct perf_file_section {
Flags section: Flags section:
The header is followed by different optional headers, described by the bits set For each of the optional features a perf_file_section it placed after the data
in flags. Only headers for which the bit is set are included. Each header section if the feature bit is set in the perf_header flags bitset. The
consists of a perf_file_section located after the initial header. respective perf_file_section points to the data of the additional header and
The respective perf_file_section points to the data of the additional defines its size.
header and defines its size.
Some headers consist of strings, which are defined like this: Some headers consist of strings, which are defined like this:
...@@ -131,7 +130,7 @@ An uint64_t with the total memory in bytes. ...@@ -131,7 +130,7 @@ An uint64_t with the total memory in bytes.
HEADER_CMDLINE = 11, HEADER_CMDLINE = 11,
A perf_header_string with the perf command line used to collect the data. A perf_header_string_list with the perf arg-vector used to collect the data.
HEADER_EVENT_DESC = 12, HEADER_EVENT_DESC = 12,
......
...@@ -2560,6 +2560,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused, ...@@ -2560,6 +2560,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
pr_warning("Overriding previous field request for %s events.\n", pr_warning("Overriding previous field request for %s events.\n",
event_type(type)); event_type(type));
/* Don't override defaults for +- */
if (strchr(tok, '+') || strchr(tok, '-'))
goto parse;
output[type].fields = 0; output[type].fields = 0;
output[type].user_set = true; output[type].user_set = true;
output[type].wildcard_set = false; output[type].wildcard_set = false;
...@@ -2644,6 +2648,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused, ...@@ -2644,6 +2648,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
if (change == REMOVE)
output[type].fields &= ~all_output_options[i].field;
else
output[type].fields |= all_output_options[i].field;
output[type].user_set = true; output[type].user_set = true;
output[type].wildcard_set = true; output[type].wildcard_set = true;
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <traceevent/event-parse.h> #include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h> #include <api/fs/tracing_path.h>
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "util/bpf_map.h"
#include "builtin.h" #include "builtin.h"
#include "util/cgroup.h" #include "util/cgroup.h"
#include "util/color.h" #include "util/color.h"
...@@ -87,6 +88,9 @@ struct trace { ...@@ -87,6 +88,9 @@ struct trace {
*augmented; *augmented;
} events; } events;
} syscalls; } syscalls;
struct {
struct bpf_map *map;
} dump;
struct record_opts opts; struct record_opts opts;
struct perf_evlist *evlist; struct perf_evlist *evlist;
struct machine *host; struct machine *host;
...@@ -2997,6 +3001,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv) ...@@ -2997,6 +3001,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0) if (err < 0)
goto out_error_apply_filters; goto out_error_apply_filters;
if (trace->dump.map)
bpf_map__fprintf(trace->dump.map, trace->output);
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages); err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0) if (err < 0)
goto out_error_mmap; goto out_error_mmap;
...@@ -3686,6 +3693,7 @@ int cmd_trace(int argc, const char **argv) ...@@ -3686,6 +3693,7 @@ int cmd_trace(int argc, const char **argv)
.max_stack = UINT_MAX, .max_stack = UINT_MAX,
.max_events = ULONG_MAX, .max_events = ULONG_MAX,
}; };
const char *map_dump_str = NULL;
const char *output_name = NULL; const char *output_name = NULL;
const struct option trace_options[] = { const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event", OPT_CALLBACK('e', "event", &trace, "event",
...@@ -3718,6 +3726,9 @@ int cmd_trace(int argc, const char **argv) ...@@ -3718,6 +3726,9 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "duration", &trace, "float", OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms", "show only events with duration > N.M ms",
trace__set_duration), trace__set_duration),
#ifdef HAVE_LIBBPF_SUPPORT
OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
#endif
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"), OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time, OPT_BOOLEAN('T', "time", &trace.full_time,
...@@ -3812,6 +3823,14 @@ int cmd_trace(int argc, const char **argv) ...@@ -3812,6 +3823,14 @@ int cmd_trace(int argc, const char **argv)
err = -1; err = -1;
if (map_dump_str) {
trace.dump.map = bpf__find_map_by_name(map_dump_str);
if (trace.dump.map == NULL) {
pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
goto out;
}
}
if (trace.trace_pgfaults) { if (trace.trace_pgfaults) {
trace.opts.sample_address = true; trace.opts.sample_address = true;
trace.opts.sample_time = true; trace.opts.sample_time = true;
......
...@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes ...@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1; return -1;
} }
if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1; ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
...@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes ...@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true)) if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1; ret = -1;
if (perf_evsel__test_field(evsel, "next_comm", 16, true)) if (perf_evsel__test_field(evsel, "next_comm", 16, false))
ret = -1; ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true)) if (perf_evsel__test_field(evsel, "next_pid", 4, true))
...@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes ...@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1; return -1;
} }
if (perf_evsel__test_field(evsel, "comm", 16, true)) if (perf_evsel__test_field(evsel, "comm", 16, false))
ret = -1; ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true)) if (perf_evsel__test_field(evsel, "pid", 4, true))
......
...@@ -4,3 +4,8 @@ skip_if_no_perf_probe() { ...@@ -4,3 +4,8 @@ skip_if_no_perf_probe() {
perf probe 2>&1 | grep -q 'is not a perf-command' && return 2 perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
return 0 return 0
} }
skip_if_no_perf_trace() {
perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
return 0
}
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
. $(dirname $0)/lib/probe.sh . $(dirname $0)/lib/probe.sh
skip_if_no_perf_probe || exit 2 skip_if_no_perf_probe || exit 2
skip_if_no_perf_trace || exit 2
. $(dirname $0)/lib/probe_vfs_getname.sh . $(dirname $0)/lib/probe_vfs_getname.sh
......
...@@ -69,6 +69,7 @@ perf-y += hist.o ...@@ -69,6 +69,7 @@ perf-y += hist.o
perf-y += util.o perf-y += util.o
perf-y += xyarray.o perf-y += xyarray.o
perf-y += cpumap.o perf-y += cpumap.o
perf-y += cputopo.o
perf-y += cgroup.o perf-y += cgroup.o
perf-y += target.o perf-y += target.o
perf-y += rblist.o perf-y += rblist.o
...@@ -114,6 +115,7 @@ perf-y += branch.o ...@@ -114,6 +115,7 @@ perf-y += branch.o
perf-y += mem2node.o perf-y += mem2node.o
perf-$(CONFIG_LIBBPF) += bpf-loader.o perf-$(CONFIG_LIBBPF) += bpf-loader.o
perf-$(CONFIG_LIBBPF) += bpf_map.o
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o perf-$(CONFIG_LIBELF) += probe-file.o
......
...@@ -233,7 +233,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool, ...@@ -233,7 +233,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
err = 0; err = 0;
break; break;
} }
pr_debug("%s: can't get next program: %s%s", pr_debug("%s: can't get next program: %s%s\n",
__func__, strerror(errno), __func__, strerror(errno),
errno == EINVAL ? " -- kernel too old?" : ""); errno == EINVAL ? " -- kernel too old?" : "");
/* don't report error on old kernel or EPERM */ /* don't report error on old kernel or EPERM */
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#include "util/bpf_map.h"
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def)
{
return def->type == BPF_MAP_TYPE_PERCPU_HASH ||
def->type == BPF_MAP_TYPE_PERCPU_ARRAY ||
def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
}
static void *bpf_map_def__alloc_value(const struct bpf_map_def *def)
{
if (bpf_map_def__is_per_cpu(def))
return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF));
return malloc(def->value_size);
}
int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
{
const struct bpf_map_def *def = bpf_map__def(map);
void *prev_key = NULL, *key, *value;
int fd = bpf_map__fd(map), err;
int printed = 0;
if (fd < 0)
return fd;
if (IS_ERR(def))
return PTR_ERR(def);
err = -ENOMEM;
key = malloc(def->key_size);
if (key == NULL)
goto out;
value = bpf_map_def__alloc_value(def);
if (value == NULL)
goto out_free_key;
while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
int intkey = *(int *)key;
if (!bpf_map_lookup_elem(fd, key, value)) {
bool boolval = *(bool *)value;
if (boolval)
printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
} else {
printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
}
prev_key = key;
}
if (err == ENOENT)
err = printed;
free(value);
out_free_key:
free(key);
out:
return err;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#ifndef __PERF_BPF_MAP_H
#define __PERF_BPF_MAP_H 1
#include <stdio.h>
#include <linux/compiler.h>
struct bpf_map;
#ifdef HAVE_LIBBPF_SUPPORT
int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
#else
static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
{
return 0;
}
#endif // HAVE_LIBBPF_SUPPORT
#endif // __PERF_BPF_MAP_H
...@@ -681,7 +681,7 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size) ...@@ -681,7 +681,7 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
#undef COMMA #undef COMMA
pr_debug("cpumask list: %s\n", buf); pr_debug2("cpumask list: %s\n", buf);
return ret; return ret;
} }
......
// SPDX-License-Identifier: GPL-2.0
#include <sys/param.h>
#include <inttypes.h>
#include <api/fs/fs.h>
#include "cputopo.h"
#include "cpumap.h"
#include "util.h"
#include "env.h"
#define CORE_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
#define THRD_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
#define NODE_ONLINE_FMT \
"%s/devices/system/node/online"
#define NODE_MEMINFO_FMT \
"%s/devices/system/node/node%d/meminfo"
#define NODE_CPULIST_FMT \
"%s/devices/system/node/node%d/cpulist"
static int build_cpu_topology(struct cpu_topology *tp, int cpu)
{
FILE *fp;
char filename[MAXPATHLEN];
char *buf = NULL, *p;
size_t len = 0;
ssize_t sret;
u32 i = 0;
int ret = -1;
scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
goto try_threads;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_threads;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->core_sib; i++) {
if (!strcmp(buf, tp->core_siblings[i]))
break;
}
if (i == tp->core_sib) {
tp->core_siblings[i] = buf;
tp->core_sib++;
buf = NULL;
len = 0;
}
ret = 0;
try_threads:
scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
goto done;
if (getline(&buf, &len, fp) <= 0)
goto done;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->thread_sib; i++) {
if (!strcmp(buf, tp->thread_siblings[i]))
break;
}
if (i == tp->thread_sib) {
tp->thread_siblings[i] = buf;
tp->thread_sib++;
buf = NULL;
}
ret = 0;
done:
if (fp)
fclose(fp);
free(buf);
return ret;
}
void cpu_topology__delete(struct cpu_topology *tp)
{
u32 i;
if (!tp)
return;
for (i = 0 ; i < tp->core_sib; i++)
zfree(&tp->core_siblings[i]);
for (i = 0 ; i < tp->thread_sib; i++)
zfree(&tp->thread_siblings[i]);
free(tp);
}
struct cpu_topology *cpu_topology__new(void)
{
struct cpu_topology *tp = NULL;
void *addr;
u32 nr, i;
size_t sz;
long ncpus;
int ret = -1;
struct cpu_map *map;
ncpus = cpu__max_present_cpu();
/* build online CPU map */
map = cpu_map__new(NULL);
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
return NULL;
}
nr = (u32)(ncpus & UINT_MAX);
sz = nr * sizeof(char *);
addr = calloc(1, sizeof(*tp) + 2 * sz);
if (!addr)
goto out_free;
tp = addr;
addr += sizeof(*tp);
tp->core_siblings = addr;
addr += sz;
tp->thread_siblings = addr;
for (i = 0; i < nr; i++) {
if (!cpu_map__has(map, i))
continue;
ret = build_cpu_topology(tp, i);
if (ret < 0)
break;
}
out_free:
cpu_map__put(map);
if (ret) {
cpu_topology__delete(tp);
tp = NULL;
}
return tp;
}
static int load_numa_node(struct numa_topology_node *node, int nr)
{
char str[MAXPATHLEN];
char field[32];
char *buf = NULL, *p;
size_t len = 0;
int ret = -1;
FILE *fp;
u64 mem;
node->node = (u32) nr;
scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
sysfs__mountpoint(), nr);
fp = fopen(str, "r");
if (!fp)
return -1;
while (getline(&buf, &len, fp) > 0) {
/* skip over invalid lines */
if (!strchr(buf, ':'))
continue;
if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
goto err;
if (!strcmp(field, "MemTotal:"))
node->mem_total = mem;
if (!strcmp(field, "MemFree:"))
node->mem_free = mem;
if (node->mem_total && node->mem_free)
break;
}
fclose(fp);
fp = NULL;
scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
sysfs__mountpoint(), nr);
fp = fopen(str, "r");
if (!fp)
return -1;
if (getline(&buf, &len, fp) <= 0)
goto err;
p = strchr(buf, '\n');
if (p)
*p = '\0';
node->cpus = buf;
fclose(fp);
return 0;
err:
free(buf);
if (fp)
fclose(fp);
return ret;
}
struct numa_topology *numa_topology__new(void)
{
struct cpu_map *node_map = NULL;
struct numa_topology *tp = NULL;
char path[MAXPATHLEN];
char *buf = NULL;
size_t len = 0;
u32 nr, i;
FILE *fp;
char *c;
scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
sysfs__mountpoint());
fp = fopen(path, "r");
if (!fp)
return NULL;
if (getline(&buf, &len, fp) <= 0)
goto out;
c = strchr(buf, '\n');
if (c)
*c = '\0';
node_map = cpu_map__new(buf);
if (!node_map)
goto out;
nr = (u32) node_map->nr;
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
if (!tp)
goto out;
tp->nr = nr;
for (i = 0; i < nr; i++) {
if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
numa_topology__delete(tp);
tp = NULL;
break;
}
}
out:
free(buf);
fclose(fp);
cpu_map__put(node_map);
return tp;
}
void numa_topology__delete(struct numa_topology *tp)
{
u32 i;
for (i = 0; i < tp->nr; i++)
free(tp->nodes[i].cpus);
free(tp);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_CPUTOPO_H
#define __PERF_CPUTOPO_H
#include <linux/types.h>
#include "env.h"
struct cpu_topology {
u32 core_sib;
u32 thread_sib;
char **core_siblings;
char **thread_siblings;
};
struct numa_topology_node {
char *cpus;
u32 node;
u64 mem_total;
u64 mem_free;
};
struct numa_topology {
u32 nr;
struct numa_topology_node nodes[0];
};
struct cpu_topology *cpu_topology__new(void);
void cpu_topology__delete(struct cpu_topology *tp);
struct numa_topology *numa_topology__new(void);
void numa_topology__delete(struct numa_topology *tp);
#endif /* __PERF_CPUTOPO_H */
...@@ -956,6 +956,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, ...@@ -956,6 +956,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0; attr->sample_freq = 0;
attr->sample_period = 0; attr->sample_period = 0;
attr->write_backward = 0; attr->write_backward = 0;
/*
* We don't get sample for slave events, we make them
* when delivering group leader sample. Set the slave
* event to follow the master sample_type to ease up
* report.
*/
attr->sample_type = leader->attr.sample_type;
} }
if (opts->no_samples) if (opts->no_samples)
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "tool.h" #include "tool.h"
#include "time-utils.h" #include "time-utils.h"
#include "units.h" #include "units.h"
#include "cputopo.h"
#include "sane_ctype.h" #include "sane_ctype.h"
...@@ -557,158 +558,15 @@ static int write_cmdline(struct feat_fd *ff, ...@@ -557,158 +558,15 @@ static int write_cmdline(struct feat_fd *ff,
return 0; return 0;
} }
#define CORE_SIB_FMT \
"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
#define THRD_SIB_FMT \
"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
struct cpu_topo {
u32 core_sib;
u32 thread_sib;
char **core_siblings;
char **thread_siblings;
};
static int build_cpu_topo(struct cpu_topo *tp, int cpu)
{
FILE *fp;
char filename[MAXPATHLEN];
char *buf = NULL, *p;
size_t len = 0;
ssize_t sret;
u32 i = 0;
int ret = -1;
sprintf(filename, CORE_SIB_FMT, cpu);
fp = fopen(filename, "r");
if (!fp)
goto try_threads;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_threads;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->core_sib; i++) {
if (!strcmp(buf, tp->core_siblings[i]))
break;
}
if (i == tp->core_sib) {
tp->core_siblings[i] = buf;
tp->core_sib++;
buf = NULL;
len = 0;
}
ret = 0;
try_threads:
sprintf(filename, THRD_SIB_FMT, cpu);
fp = fopen(filename, "r");
if (!fp)
goto done;
if (getline(&buf, &len, fp) <= 0)
goto done;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->thread_sib; i++) {
if (!strcmp(buf, tp->thread_siblings[i]))
break;
}
if (i == tp->thread_sib) {
tp->thread_siblings[i] = buf;
tp->thread_sib++;
buf = NULL;
}
ret = 0;
done:
if(fp)
fclose(fp);
free(buf);
return ret;
}
static void free_cpu_topo(struct cpu_topo *tp)
{
u32 i;
if (!tp)
return;
for (i = 0 ; i < tp->core_sib; i++)
zfree(&tp->core_siblings[i]);
for (i = 0 ; i < tp->thread_sib; i++)
zfree(&tp->thread_siblings[i]);
free(tp);
}
static struct cpu_topo *build_cpu_topology(void)
{
struct cpu_topo *tp = NULL;
void *addr;
u32 nr, i;
size_t sz;
long ncpus;
int ret = -1;
struct cpu_map *map;
ncpus = cpu__max_present_cpu();
/* build online CPU map */
map = cpu_map__new(NULL);
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
return NULL;
}
nr = (u32)(ncpus & UINT_MAX);
sz = nr * sizeof(char *);
addr = calloc(1, sizeof(*tp) + 2 * sz);
if (!addr)
goto out_free;
tp = addr;
addr += sizeof(*tp);
tp->core_siblings = addr;
addr += sz;
tp->thread_siblings = addr;
for (i = 0; i < nr; i++) {
if (!cpu_map__has(map, i))
continue;
ret = build_cpu_topo(tp, i);
if (ret < 0)
break;
}
out_free:
cpu_map__put(map);
if (ret) {
free_cpu_topo(tp);
tp = NULL;
}
return tp;
}
static int write_cpu_topology(struct feat_fd *ff, static int write_cpu_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused) struct perf_evlist *evlist __maybe_unused)
{ {
struct cpu_topo *tp; struct cpu_topology *tp;
u32 i; u32 i;
int ret, j; int ret, j;
tp = build_cpu_topology(); tp = cpu_topology__new();
if (!tp) if (!tp)
return -1; return -1;
...@@ -746,7 +604,7 @@ static int write_cpu_topology(struct feat_fd *ff, ...@@ -746,7 +604,7 @@ static int write_cpu_topology(struct feat_fd *ff,
return ret; return ret;
} }
done: done:
free_cpu_topo(tp); cpu_topology__delete(tp);
return ret; return ret;
} }
...@@ -781,112 +639,45 @@ static int write_total_mem(struct feat_fd *ff, ...@@ -781,112 +639,45 @@ static int write_total_mem(struct feat_fd *ff,
return ret; return ret;
} }
static int write_topo_node(struct feat_fd *ff, int node)
{
char str[MAXPATHLEN];
char field[32];
char *buf = NULL, *p;
size_t len = 0;
FILE *fp;
u64 mem_total, mem_free, mem;
int ret = -1;
sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
fp = fopen(str, "r");
if (!fp)
return -1;
while (getline(&buf, &len, fp) > 0) {
/* skip over invalid lines */
if (!strchr(buf, ':'))
continue;
if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
goto done;
if (!strcmp(field, "MemTotal:"))
mem_total = mem;
if (!strcmp(field, "MemFree:"))
mem_free = mem;
}
fclose(fp);
fp = NULL;
ret = do_write(ff, &mem_total, sizeof(u64));
if (ret)
goto done;
ret = do_write(ff, &mem_free, sizeof(u64));
if (ret)
goto done;
ret = -1;
sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
fp = fopen(str, "r");
if (!fp)
goto done;
if (getline(&buf, &len, fp) <= 0)
goto done;
p = strchr(buf, '\n');
if (p)
*p = '\0';
ret = do_write_string(ff, buf);
done:
free(buf);
if (fp)
fclose(fp);
return ret;
}
static int write_numa_topology(struct feat_fd *ff, static int write_numa_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused) struct perf_evlist *evlist __maybe_unused)
{ {
char *buf = NULL; struct numa_topology *tp;
size_t len = 0;
FILE *fp;
struct cpu_map *node_map = NULL;
char *c;
u32 nr, i, j;
int ret = -1; int ret = -1;
u32 i;
fp = fopen("/sys/devices/system/node/online", "r"); tp = numa_topology__new();
if (!fp) if (!tp)
return -1; return -ENOMEM;
if (getline(&buf, &len, fp) <= 0)
goto done;
c = strchr(buf, '\n');
if (c)
*c = '\0';
node_map = cpu_map__new(buf); ret = do_write(ff, &tp->nr, sizeof(u32));
if (!node_map) if (ret < 0)
goto done; goto err;
nr = (u32)node_map->nr; for (i = 0; i < tp->nr; i++) {
struct numa_topology_node *n = &tp->nodes[i];
ret = do_write(ff, &nr, sizeof(nr)); ret = do_write(ff, &n->node, sizeof(u32));
if (ret < 0) if (ret < 0)
goto done; goto err;
for (i = 0; i < nr; i++) { ret = do_write(ff, &n->mem_total, sizeof(u64));
j = (u32)node_map->map[i]; if (ret)
ret = do_write(ff, &j, sizeof(j)); goto err;
if (ret < 0)
break;
ret = write_topo_node(ff, i); ret = do_write(ff, &n->mem_free, sizeof(u64));
if (ret)
goto err;
ret = do_write_string(ff, n->cpus);
if (ret < 0) if (ret < 0)
break; goto err;
} }
done:
free(buf); ret = 0;
fclose(fp);
cpu_map__put(node_map); err:
numa_topology__delete(tp);
return ret; return ret;
} }
......
...@@ -1202,6 +1202,13 @@ static int deliver_sample_value(struct perf_evlist *evlist, ...@@ -1202,6 +1202,13 @@ static int deliver_sample_value(struct perf_evlist *evlist,
return 0; return 0;
} }
/*
* There's no reason to deliver sample
* for zero period, bail out.
*/
if (!sample->period)
return 0;
return tool->sample(tool, event, sample, sid->evsel, machine); return tool->sample(tool, event, sample, sid->evsel, machine);
} }
......
...@@ -231,8 +231,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) ...@@ -231,8 +231,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
if (sym_l == sym_r) if (sym_l == sym_r)
return 0; return 0;
if (sym_l->inlined || sym_r->inlined) if (sym_l->inlined || sym_r->inlined) {
return strcmp(sym_l->name, sym_r->name); int ret = strcmp(sym_l->name, sym_r->name);
if (ret)
return ret;
if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
return 0;
}
if (sym_l->start != sym_r->start) if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start); return (int64_t)(sym_r->start - sym_l->start);
......
...@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso, ...@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
} else { } else {
/* create a fake symbol for the inline frame */ /* create a fake symbol for the inline frame */
inline_sym = symbol__new(base_sym ? base_sym->start : 0, inline_sym = symbol__new(base_sym ? base_sym->start : 0,
base_sym ? base_sym->end : 0, base_sym ? (base_sym->end - base_sym->start) : 0,
base_sym ? base_sym->binding : 0, base_sym ? base_sym->binding : 0,
base_sym ? base_sym->type : 0, base_sym ? base_sym->type : 0,
funcname); funcname);
......
...@@ -120,16 +120,26 @@ int mkdir_p(char *path, mode_t mode) ...@@ -120,16 +120,26 @@ int mkdir_p(char *path, mode_t mode)
int rm_rf(const char *path) int rm_rf(const char *path)
{ {
DIR *dir; DIR *dir;
int ret = 0; int ret;
struct dirent *d; struct dirent *d;
char namebuf[PATH_MAX]; char namebuf[PATH_MAX];
struct stat statbuf;
/* Do not fail if there's no file. */
ret = lstat(path, &statbuf);
if (ret)
return 0;
/* Try to remove any file we get. */
if (!(statbuf.st_mode & S_IFDIR))
return unlink(path);
/* We have directory in path. */
dir = opendir(path); dir = opendir(path);
if (dir == NULL) if (dir == NULL)
return 0; return -1;
while ((d = readdir(dir)) != NULL && !ret) { while ((d = readdir(dir)) != NULL && !ret) {
struct stat statbuf;
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment