Commit c1a515d3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v6.5-2-2023-08-03' of...

Merge tag 'perf-tools-fixes-for-v6.5-2-2023-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools

Pull perf tools fixes from Arnaldo Carvalho de Melo:

 - Fix segfault in the powerpc specific arch_skip_callchain_idx
   function. The patch doing the reference count init/exit that went
   into 6.5 missed this function.

 - Fix regression reading the arm64 PMU cpu slots in sysfs, a patch
   removing some code duplication ended up duplicating the /sysfs prefix
   for these files.

 - Fix grouping of events related to topdown, addressing a regression on
   the CSV output produced by 'perf stat' noticed on the downstream tool
   toplev.

 - Fix the uprobe_from_different_cu 'perf test' entry, it is failing
   when gcc isn't available, so we need to check that and skip the test
   if it is not installed.

* tag 'perf-tools-fixes-for-v6.5-2-2023-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools:
  perf test parse-events: Test complex name has required event format
  perf pmus: Create placholder regardless of scanning core_only
  perf test uprobe_from_different_cu: Skip if there is no gcc
  perf parse-events: Only move force grouped evsels when sorting
  perf parse-events: When fixing group leaders always set the leader
  perf parse-events: Extra care around force grouped events
  perf callchain powerpc: Fix addr location init during arch_skip_callchain_idx function
  perf pmu arm64: Fix reading the PMU cpu slots in sysfs
parents 638c1913 07d2b820
...@@ -54,10 +54,11 @@ double perf_pmu__cpu_slots_per_cycle(void) ...@@ -54,10 +54,11 @@ double perf_pmu__cpu_slots_per_cycle(void)
perf_pmu__pathname_scnprintf(path, sizeof(path), perf_pmu__pathname_scnprintf(path, sizeof(path),
pmu->name, "caps/slots"); pmu->name, "caps/slots");
/* /*
* The value of slots is not greater than 32 bits, but sysfs__read_int * The value of slots is not greater than 32 bits, but
* can't read value with 0x prefix, so use sysfs__read_ull instead. * filename__read_int can't read value with 0x prefix,
* so use filename__read_ull instead.
*/ */
sysfs__read_ull(path, &slots); filename__read_ull(path, &slots);
} }
return slots ? (double)slots : NAN; return slots ? (double)slots : NAN;
......
...@@ -250,6 +250,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) ...@@ -250,6 +250,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
if (!chain || chain->nr < 3) if (!chain || chain->nr < 3)
return skip_slot; return skip_slot;
addr_location__init(&al);
ip = chain->ips[1]; ip = chain->ips[1];
thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
...@@ -259,6 +260,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) ...@@ -259,6 +260,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
if (!dso) { if (!dso) {
pr_debug("%" PRIx64 " dso is NULL\n", ip); pr_debug("%" PRIx64 " dso is NULL\n", ip);
addr_location__exit(&al);
return skip_slot; return skip_slot;
} }
...@@ -279,5 +281,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) ...@@ -279,5 +281,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
*/ */
skip_slot = 3; skip_slot = 3;
} }
addr_location__exit(&al);
return skip_slot; return skip_slot;
} }
...@@ -1631,6 +1631,16 @@ static bool test__pmu_cpu_valid(void) ...@@ -1631,6 +1631,16 @@ static bool test__pmu_cpu_valid(void)
return !!perf_pmus__find("cpu"); return !!perf_pmus__find("cpu");
} }
static bool test__pmu_cpu_event_valid(void)
{
struct perf_pmu *pmu = perf_pmus__find("cpu");
if (!pmu)
return false;
return perf_pmu__has_format(pmu, "event");
}
static bool test__intel_pt_valid(void) static bool test__intel_pt_valid(void)
{ {
return !!perf_pmus__find("intel_pt"); return !!perf_pmus__find("intel_pt");
...@@ -2179,7 +2189,7 @@ static const struct evlist_test test__events_pmu[] = { ...@@ -2179,7 +2189,7 @@ static const struct evlist_test test__events_pmu[] = {
}, },
{ {
.name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp", .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
.valid = test__pmu_cpu_valid, .valid = test__pmu_cpu_event_valid,
.check = test__checkevent_complex_name, .check = test__checkevent_complex_name,
/* 3 */ /* 3 */
}, },
......
...@@ -4,6 +4,12 @@ ...@@ -4,6 +4,12 @@
set -e set -e
# skip if there's no gcc
if ! [ -x "$(command -v gcc)" ]; then
echo "failed: no gcc compiler"
exit 2
fi
temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX) temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
cleanup() cleanup()
...@@ -11,7 +17,7 @@ cleanup() ...@@ -11,7 +17,7 @@ cleanup()
trap - EXIT TERM INT trap - EXIT TERM INT
if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
echo "--- Cleaning up ---" echo "--- Cleaning up ---"
perf probe -x ${temp_dir}/testfile -d foo perf probe -x ${temp_dir}/testfile -d foo || true
rm -f "${temp_dir}/"* rm -f "${temp_dir}/"*
rmdir "${temp_dir}" rmdir "${temp_dir}"
fi fi
......
...@@ -2100,16 +2100,16 @@ __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) ...@@ -2100,16 +2100,16 @@ __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
return lhs->core.idx - rhs->core.idx; return lhs->core.idx - rhs->core.idx;
} }
static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r) static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
{ {
const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
const struct evsel *lhs = container_of(lhs_core, struct evsel, core); const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
const struct evsel *rhs = container_of(rhs_core, struct evsel, core); const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
int *leader_idx = state; int *force_grouped_idx = _fg_idx;
int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret; int lhs_sort_idx, rhs_sort_idx, ret;
const char *lhs_pmu_name, *rhs_pmu_name; const char *lhs_pmu_name, *rhs_pmu_name;
bool lhs_has_group = false, rhs_has_group = false; bool lhs_has_group, rhs_has_group;
/* /*
* First sort by grouping/leader. Read the leader idx only if the evsel * First sort by grouping/leader. Read the leader idx only if the evsel
...@@ -2121,15 +2121,25 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list ...@@ -2121,15 +2121,25 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list
*/ */
if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
lhs_has_group = true; lhs_has_group = true;
lhs_leader_idx = lhs_core->leader->idx; lhs_sort_idx = lhs_core->leader->idx;
} else {
lhs_has_group = false;
lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
? *force_grouped_idx
: lhs_core->idx;
} }
if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
rhs_has_group = true; rhs_has_group = true;
rhs_leader_idx = rhs_core->leader->idx; rhs_sort_idx = rhs_core->leader->idx;
} else {
rhs_has_group = false;
rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
? *force_grouped_idx
: rhs_core->idx;
} }
if (lhs_leader_idx != rhs_leader_idx) if (lhs_sort_idx != rhs_sort_idx)
return lhs_leader_idx - rhs_leader_idx; return lhs_sort_idx - rhs_sort_idx;
/* Group by PMU if there is a group. Groups can't span PMUs. */ /* Group by PMU if there is a group. Groups can't span PMUs. */
if (lhs_has_group && rhs_has_group) { if (lhs_has_group && rhs_has_group) {
...@@ -2146,10 +2156,10 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list ...@@ -2146,10 +2156,10 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list
static int parse_events__sort_events_and_fix_groups(struct list_head *list) static int parse_events__sort_events_and_fix_groups(struct list_head *list)
{ {
int idx = 0, unsorted_idx = -1; int idx = 0, force_grouped_idx = -1;
struct evsel *pos, *cur_leader = NULL; struct evsel *pos, *cur_leader = NULL;
struct perf_evsel *cur_leaders_grp = NULL; struct perf_evsel *cur_leaders_grp = NULL;
bool idx_changed = false; bool idx_changed = false, cur_leader_force_grouped = false;
int orig_num_leaders = 0, num_leaders = 0; int orig_num_leaders = 0, num_leaders = 0;
int ret; int ret;
...@@ -2174,12 +2184,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) ...@@ -2174,12 +2184,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
*/ */
pos->core.idx = idx++; pos->core.idx = idx++;
if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2) /* Remember an index to sort all forced grouped events together to. */
unsorted_idx = pos->core.idx; if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
arch_evsel__must_be_in_group(pos))
force_grouped_idx = pos->core.idx;
} }
/* Sort events. */ /* Sort events. */
list_sort(&unsorted_idx, list, evlist__cmp); list_sort(&force_grouped_idx, list, evlist__cmp);
/* /*
* Recompute groups, splitting for PMUs and adding groups for events * Recompute groups, splitting for PMUs and adding groups for events
...@@ -2189,8 +2201,9 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) ...@@ -2189,8 +2201,9 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
list_for_each_entry(pos, list, core.node) { list_for_each_entry(pos, list, core.node) {
const struct evsel *pos_leader = evsel__leader(pos); const struct evsel *pos_leader = evsel__leader(pos);
const char *pos_pmu_name = pos->group_pmu_name; const char *pos_pmu_name = pos->group_pmu_name;
const char *cur_leader_pmu_name, *pos_leader_pmu_name; const char *cur_leader_pmu_name;
bool force_grouped = arch_evsel__must_be_in_group(pos); bool pos_force_grouped = force_grouped_idx != -1 &&
arch_evsel__must_be_in_group(pos);
/* Reset index and nr_members. */ /* Reset index and nr_members. */
if (pos->core.idx != idx) if (pos->core.idx != idx)
...@@ -2206,7 +2219,8 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) ...@@ -2206,7 +2219,8 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
cur_leader = pos; cur_leader = pos;
cur_leader_pmu_name = cur_leader->group_pmu_name; cur_leader_pmu_name = cur_leader->group_pmu_name;
if ((cur_leaders_grp != pos->core.leader && !force_grouped) || if ((cur_leaders_grp != pos->core.leader &&
(!pos_force_grouped || !cur_leader_force_grouped)) ||
strcmp(cur_leader_pmu_name, pos_pmu_name)) { strcmp(cur_leader_pmu_name, pos_pmu_name)) {
/* Event is for a different group/PMU than last. */ /* Event is for a different group/PMU than last. */
cur_leader = pos; cur_leader = pos;
...@@ -2216,14 +2230,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) ...@@ -2216,14 +2230,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
* group. * group.
*/ */
cur_leaders_grp = pos->core.leader; cur_leaders_grp = pos->core.leader;
}
pos_leader_pmu_name = pos_leader->group_pmu_name;
if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) {
/* /*
* Event's PMU differs from its leader's. Groups can't * Avoid forcing events into groups with events that
* span PMUs, so update leader from the group/PMU * don't need to be in the group.
* tracker.
*/ */
cur_leader_force_grouped = pos_force_grouped;
}
if (pos_leader != cur_leader) {
/* The leader changed so update it. */
evsel__set_leader(pos, cur_leader); evsel__set_leader(pos, cur_leader);
} }
} }
......
...@@ -1440,6 +1440,17 @@ void perf_pmu__del_formats(struct list_head *formats) ...@@ -1440,6 +1440,17 @@ void perf_pmu__del_formats(struct list_head *formats)
} }
} }
bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name)
{
struct perf_pmu_format *format;
list_for_each_entry(format, &pmu->format, list) {
if (!strcmp(format->name, name))
return true;
}
return false;
}
bool is_pmu_core(const char *name) bool is_pmu_core(const char *name)
{ {
return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name); return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name);
......
...@@ -234,6 +234,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, ...@@ -234,6 +234,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
void perf_pmu__set_format(unsigned long *bits, long from, long to); void perf_pmu__set_format(unsigned long *bits, long from, long to);
int perf_pmu__format_parse(int dirfd, struct list_head *head); int perf_pmu__format_parse(int dirfd, struct list_head *head);
void perf_pmu__del_formats(struct list_head *formats); void perf_pmu__del_formats(struct list_head *formats);
bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name);
bool is_pmu_core(const char *name); bool is_pmu_core(const char *name);
bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu); bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
......
...@@ -152,16 +152,14 @@ static void pmu_read_sysfs(bool core_only) ...@@ -152,16 +152,14 @@ static void pmu_read_sysfs(bool core_only)
} }
closedir(dir); closedir(dir);
if (core_only) { if (list_empty(&core_pmus)) {
if (!list_empty(&core_pmus)) if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
read_sysfs_core_pmus = true; pr_err("Failure to set up any core PMUs\n");
else { }
if (perf_pmu__create_placeholder_core_pmu(&core_pmus)) if (!list_empty(&core_pmus)) {
read_sysfs_core_pmus = true;
}
} else {
read_sysfs_core_pmus = true; read_sysfs_core_pmus = true;
read_sysfs_all_pmus = true; if (!core_only)
read_sysfs_all_pmus = true;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment