Commit 5ac72634 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf tools: Warn if no user requested CPUs match PMU's CPUs

In commit 1d3351e6 ("perf tools: Enable on a list of CPUs for hybrid")
perf on hybrid will warn if a user requested CPU doesn't match the PMU
of the given event but only for hybrid PMUs. Make the logic generic
for all PMUs and remove the hybrid logic.

Warn if a CPU is requested that isn't present/offline for events not
on the core. Warn if a CPU is requested for a core PMU, but the CPU
isn't within the cpu map of that PMU.

For example on a 16 (0-15) CPU system:
```
$ perf stat -e imc_free_running/data_read/,cycles -C 16 true
WARNING: A requested CPU in '16' is not supported by PMU 'uncore_imc_free_running_1' (CPUs 0-15) for event 'imc_free_running/data_read/'
WARNING: A requested CPU in '16' is not supported by PMU 'uncore_imc_free_running_0' (CPUs 0-15) for event 'imc_free_running/data_read/'
WARNING: A requested CPU in '16' is not supported by PMU 'cpu' (CPUs 0-15) for event 'cycles'

 Performance counter stats for 'CPU(s) 16':

   <not supported> MiB  imc_free_running/data_read/
   <not supported>      cycles

       0.000575312 seconds time elapsed
```

Remove evlist__fix_hybrid_cpus that previously produced the warnings
and also perf_pmu__cpus_match that worked with evlist__fix_hybrid_cpus
to change CPU maps for hybrid CPUs, something that is no longer
necessary as CPU map propagation properly intersects user requested
CPUs with the core PMU's CPU map.
Reviewed-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230527072210.2900565-12-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 8ec984d5
...@@ -4198,11 +4198,7 @@ int cmd_record(int argc, const char **argv) ...@@ -4198,11 +4198,7 @@ int cmd_record(int argc, const char **argv)
/* Enable ignoring missing threads when -u/-p option is defined. */ /* Enable ignoring missing threads when -u/-p option is defined. */
rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid; rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) { evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
pr_err("failed to use cpu list %s\n",
rec->opts.target.cpu_list);
goto out;
}
if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP) if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
arch__add_leaf_frame_record_opts(&rec->opts); arch__add_leaf_frame_record_opts(&rec->opts);
......
...@@ -2725,10 +2725,7 @@ int cmd_stat(int argc, const char **argv) ...@@ -2725,10 +2725,7 @@ int cmd_stat(int argc, const char **argv)
} }
} }
if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
pr_err("failed to use cpu list %s\n", target.cpu_list);
goto out;
}
if (evlist__create_maps(evsel_list, &target) < 0) { if (evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) { if (target__has_task(&target)) {
......
...@@ -86,77 +86,3 @@ bool evlist__has_hybrid(struct evlist *evlist) ...@@ -86,77 +86,3 @@ bool evlist__has_hybrid(struct evlist *evlist)
return false; return false;
} }
int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
{
struct perf_cpu_map *cpus;
struct evsel *evsel, *tmp;
struct perf_pmu *pmu;
int ret, unmatched_count = 0, events_nr = 0;
if (!perf_pmu__has_hybrid() || !cpu_list)
return 0;
cpus = perf_cpu_map__new(cpu_list);
if (!cpus)
return -1;
/*
* The evsels are created with hybrid pmu's cpus. But now we
* need to check and adjust the cpus of evsel by cpu_list because
* cpu_list may cause conflicts with cpus of evsel. For example,
* cpus of evsel is cpu0-7, but the cpu_list is cpu6-8, we need
* to adjust the cpus of evsel to cpu6-7. And then propatate maps
* in evlist__create_maps().
*/
evlist__for_each_entry_safe(evlist, tmp, evsel) {
struct perf_cpu_map *matched_cpus, *unmatched_cpus;
char buf1[128], buf2[128];
pmu = perf_pmu__find_hybrid_pmu(evsel->pmu_name);
if (!pmu)
continue;
ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus,
&unmatched_cpus);
if (ret)
goto out;
events_nr++;
if (perf_cpu_map__nr(matched_cpus) > 0 &&
(perf_cpu_map__nr(unmatched_cpus) > 0 ||
perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) ||
perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) {
perf_cpu_map__put(evsel->core.cpus);
perf_cpu_map__put(evsel->core.own_cpus);
evsel->core.cpus = perf_cpu_map__get(matched_cpus);
evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
if (perf_cpu_map__nr(unmatched_cpus) > 0) {
cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
buf1, pmu->name, evsel->name);
}
}
if (perf_cpu_map__nr(matched_cpus) == 0) {
evlist__remove(evlist, evsel);
evsel__delete(evsel);
cpu_map__snprint(cpus, buf1, sizeof(buf1));
cpu_map__snprint(pmu->cpus, buf2, sizeof(buf2));
pr_warning("WARNING: %s isn't a '%s', please use a CPU list in the '%s' range (%s)\n",
buf1, pmu->name, pmu->name, buf2);
unmatched_count++;
}
perf_cpu_map__put(matched_cpus);
perf_cpu_map__put(unmatched_cpus);
}
if (events_nr)
ret = (unmatched_count == events_nr) ? -1 : 0;
out:
perf_cpu_map__put(cpus);
return ret;
}
...@@ -10,6 +10,5 @@ ...@@ -10,6 +10,5 @@
int evlist__add_default_hybrid(struct evlist *evlist, bool precise); int evlist__add_default_hybrid(struct evlist *evlist, bool precise);
void evlist__warn_hybrid_group(struct evlist *evlist); void evlist__warn_hybrid_group(struct evlist *evlist);
bool evlist__has_hybrid(struct evlist *evlist); bool evlist__has_hybrid(struct evlist *evlist);
int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list);
#endif /* __PERF_EVLIST_HYBRID_H */ #endif /* __PERF_EVLIST_HYBRID_H */
...@@ -2465,3 +2465,42 @@ void evlist__check_mem_load_aux(struct evlist *evlist) ...@@ -2465,3 +2465,42 @@ void evlist__check_mem_load_aux(struct evlist *evlist)
} }
} }
} }
/**
* evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs
* and warn if the user CPU list is inapplicable for the event's PMU's
* CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a
* user requested CPU and so any online CPU is applicable. Core PMUs handle
* events on the CPUs in their list and otherwise the event isn't supported.
* @evlist: The list of events being checked.
* @cpu_list: The user provided list of CPUs.
*/
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list)
{
struct perf_cpu_map *user_requested_cpus;
struct evsel *pos;
if (!cpu_list)
return;
user_requested_cpus = perf_cpu_map__new(cpu_list);
if (!user_requested_cpus)
return;
evlist__for_each_entry(evlist, pos) {
struct perf_cpu_map *intersect, *to_test;
const struct perf_pmu *pmu = evsel__find_pmu(pos);
to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online();
intersect = perf_cpu_map__intersect(to_test, user_requested_cpus);
if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
char buf[128];
cpu_map__snprint(to_test, buf, sizeof(buf));
pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos));
}
perf_cpu_map__put(intersect);
}
perf_cpu_map__put(user_requested_cpus);
}
...@@ -447,4 +447,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx); ...@@ -447,4 +447,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf); int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
void evlist__check_mem_load_aux(struct evlist *evlist); void evlist__check_mem_load_aux(struct evlist *evlist);
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
#endif /* __PERF_EVLIST_H */ #endif /* __PERF_EVLIST_H */
...@@ -2046,39 +2046,6 @@ int perf_pmu__match(char *pattern, char *name, char *tok) ...@@ -2046,39 +2046,6 @@ int perf_pmu__match(char *pattern, char *name, char *tok)
return 0; return 0;
} }
int perf_pmu__cpus_match(struct perf_pmu *pmu, struct perf_cpu_map *cpus,
struct perf_cpu_map **mcpus_ptr,
struct perf_cpu_map **ucpus_ptr)
{
struct perf_cpu_map *pmu_cpus = pmu->cpus;
struct perf_cpu_map *matched_cpus, *unmatched_cpus;
struct perf_cpu cpu;
int i, matched_nr = 0, unmatched_nr = 0;
matched_cpus = perf_cpu_map__default_new();
if (!matched_cpus)
return -1;
unmatched_cpus = perf_cpu_map__default_new();
if (!unmatched_cpus) {
perf_cpu_map__put(matched_cpus);
return -1;
}
perf_cpu_map__for_each_cpu(cpu, i, cpus) {
if (!perf_cpu_map__has(pmu_cpus, cpu))
RC_CHK_ACCESS(unmatched_cpus)->map[unmatched_nr++] = cpu;
else
RC_CHK_ACCESS(matched_cpus)->map[matched_nr++] = cpu;
}
perf_cpu_map__set_nr(unmatched_cpus, unmatched_nr);
perf_cpu_map__set_nr(matched_cpus, matched_nr);
*mcpus_ptr = matched_cpus;
*ucpus_ptr = unmatched_cpus;
return 0;
}
double __weak perf_pmu__cpu_slots_per_cycle(void) double __weak perf_pmu__cpu_slots_per_cycle(void)
{ {
return NAN; return NAN;
......
...@@ -265,10 +265,6 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu); ...@@ -265,10 +265,6 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
bool perf_pmu__has_hybrid(void); bool perf_pmu__has_hybrid(void);
int perf_pmu__match(char *pattern, char *name, char *tok); int perf_pmu__match(char *pattern, char *name, char *tok);
int perf_pmu__cpus_match(struct perf_pmu *pmu, struct perf_cpu_map *cpus,
struct perf_cpu_map **mcpus_ptr,
struct perf_cpu_map **ucpus_ptr);
char *pmu_find_real_name(const char *name); char *pmu_find_real_name(const char *name);
char *pmu_find_alias_name(const char *name); char *pmu_find_alias_name(const char *name);
double perf_pmu__cpu_slots_per_cycle(void); double perf_pmu__cpu_slots_per_cycle(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment