Commit 59126901 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v5.9-2020-09-03' of...

Merge tag 'perf-tools-fixes-for-v5.9-2020-09-03' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull more perf tools fixes from Arnaldo Carvalho de Melo:

 - Use uintptr_t when casting numbers to pointers

 - Keep output expected by 3rd parties: Turn off summary for interval
   mode by default.

 - BPF is in kernel space, make sure do_validate_kcore_modules() knows
   about that.

 - Explicitly call out event modifiers in the documentation.

 - Fix jevents() allocation of space for regular expressions.

 - Address libtraceevent build warnings on 32-bit arches.

 - Fix checking of functions returns using ERR_PTR() in 'perf bench'.

* tag 'perf-tools-fixes-for-v5.9-2020-09-03' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf tools: Add bpf image check to __map__is_kmodule
  perf record/stat: Explicitly call out event modifiers in the documentation
  perf bench: The do_run_multi_threaded() function must use IS_ERR(perf_session__new())
  perf stat: Turn off summary for interval mode by default
  libtraceevent: Fix build warning on 32-bit arches
  perf jevents: Fix suspicious code in fixregex()
  perf parse-events: Use uintptr_t when casting numbers to pointers
parents 3e8d3bdc 830fadfd
......@@ -5259,7 +5259,7 @@ static int print_arg_pointer(struct trace_seq *s, const char *format, int plen,
default:
ret = 0;
val = eval_num_arg(data, size, event, arg);
trace_seq_printf(s, "%p", (void *)val);
trace_seq_printf(s, "%p", (void *)(intptr_t)val);
break;
}
......
......@@ -33,6 +33,10 @@ OPTIONS
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
- a symbolic or raw PMU event followed by an optional colon
and a list of event modifiers, e.g., cpu-cycles:p. See the
linkperf:perf-list[1] man page for details on event modifiers.
- a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
'param1', 'param2', etc are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*.
......
......@@ -39,6 +39,10 @@ report::
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
- a symbolic or raw PMU event followed by an optional colon
and a list of event modifiers, e.g., cpu-cycles:p. See the
linkperf:perf-list[1] man page for details on event modifiers.
- a symbolically formed event like 'pmu/param1=0x3,param2/' where
param1 and param2 are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*
......@@ -416,6 +420,9 @@ counts for all hardware threads in a core but show the sum counts per
hardware thread. This is essentially a replacement for the any bit and
convenient for post processing.
--summary::
Print summary for interval mode (-I).
EXAMPLES
--------
......
......@@ -162,8 +162,8 @@ static int do_run_multi_threaded(struct target *target,
init_stats(&event_stats);
for (i = 0; i < multi_iterations; i++) {
session = perf_session__new(NULL, false, NULL);
if (!session)
return -ENOMEM;
if (IS_ERR(session))
return PTR_ERR(session);
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
......
......@@ -404,7 +404,7 @@ static void read_counters(struct timespec *rs)
{
struct evsel *counter;
if (!stat_config.summary && (read_affinity_counters(rs) < 0))
if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0))
return;
evlist__for_each_entry(evsel_list, counter) {
......@@ -897,9 +897,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (stat_config.walltime_run_table)
stat_config.walltime_run[run_idx] = t1 - t0;
if (interval) {
if (interval && stat_config.summary) {
stat_config.interval = 0;
stat_config.summary = true;
stat_config.stop_read_counter = true;
init_stats(&walltime_nsecs_stats);
update_stats(&walltime_nsecs_stats, t1 - t0);
......@@ -1164,6 +1164,8 @@ static struct option stat_options[] = {
"Use with 'percore' event qualifier to show the event "
"counts of one hardware thread by sum up total hardware "
"threads of same physical core"),
OPT_BOOLEAN(0, "summary", &stat_config.summary,
"print summary for interval mode"),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
"libpfm4 event selector. use 'perf list' to list available events",
......
......@@ -137,7 +137,7 @@ static char *fixregex(char *s)
return s;
/* allocate space for a new string */
fixed = (char *) malloc(len + 1);
fixed = (char *) malloc(len + esc_count + 1);
if (!fixed)
return NULL;
......
......@@ -736,12 +736,6 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
static int is_bpf_image(const char *name)
{
return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
}
static int machine__process_ksymbol_register(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
......
......@@ -267,6 +267,22 @@ bool __map__is_bpf_prog(const struct map *map)
return name && (strstr(name, "bpf_prog_") == name);
}
bool __map__is_bpf_image(const struct map *map)
{
const char *name;
if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
return true;
/*
* If PERF_RECORD_KSYMBOL is not included, the dso will not have
* type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
* guess the type based on name.
*/
name = map->dso->short_name;
return name && is_bpf_image(name);
}
bool __map__is_ool(const struct map *map)
{
return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
......
......@@ -147,12 +147,14 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
bool __map__is_bpf_prog(const struct map *map);
bool __map__is_bpf_image(const struct map *map);
bool __map__is_ool(const struct map *map);
static inline bool __map__is_kmodule(const struct map *map)
{
return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
!__map__is_bpf_prog(map) && !__map__is_ool(map);
!__map__is_bpf_prog(map) && !__map__is_ool(map) &&
!__map__is_bpf_image(map);
}
bool map__has_symbols(const struct map *map);
......@@ -164,4 +166,9 @@ static inline bool is_entry_trampoline(const char *name)
return !strcmp(name, ENTRY_TRAMPOLINE_NAME);
}
static inline bool is_bpf_image(const char *name)
{
return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
}
#endif /* __PERF_MAP_H */
......@@ -511,7 +511,7 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, $6, $4);
(void *)(uintptr_t) $2, $6, $4);
free($6);
if (err) {
free(list);
......@@ -528,7 +528,7 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, NULL, $4)) {
(void *)(uintptr_t) $2, NULL, $4)) {
free(list);
YYABORT;
}
......@@ -544,7 +544,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, $4, 0);
(void *)(uintptr_t) $2, $4, 0);
free($4);
if (err) {
free(list);
......@@ -561,7 +561,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, NULL, 0)) {
(void *)(uintptr_t) $2, NULL, 0)) {
free(list);
YYABORT;
}
......
......@@ -113,6 +113,7 @@ struct perf_stat_config {
bool summary;
bool metric_no_group;
bool metric_no_merge;
bool stop_read_counter;
FILE *output;
unsigned int interval;
unsigned int timeout;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment