Commit 94f9eb95 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf pmus: Remove perf_pmus__has_hybrid

perf_pmus__has_hybrid was used to detect when there was >1 core PMU,
this can be achieved with perf_pmus__num_core_pmus that doesn't depend
upon is_pmu_hybrid and PMU name comparisons. When modifying the
function calls take the opportunity to improve comments,
enable/simplify tests that were previously failing for hybrid but now
pass and to simplify generic code.
Reviewed-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230527072210.2900565-34-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 002c4845
......@@ -281,7 +281,7 @@ static int test_events(const struct evlist_test *events, int cnt)
int test__hybrid(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
if (!perf_pmus__has_hybrid())
if (perf_pmus__num_core_pmus() == 1)
return TEST_SKIP;
return test_events(test__hybrid_events, ARRAY_SIZE(test__hybrid_events));
......
......@@ -18,7 +18,7 @@ static int ___evlist__add_default_attrs(struct evlist *evlist,
for (i = 0; i < nr_attrs; i++)
event_attr_init(attrs + i);
if (!perf_pmus__has_hybrid())
if (perf_pmus__num_core_pmus() == 1)
return evlist__add_attrs(evlist, attrs, nr_attrs);
for (i = 0; i < nr_attrs; i++) {
......
......@@ -292,7 +292,7 @@ uint64_t arch__intr_reg_mask(void)
*/
attr.sample_period = 1;
if (perf_pmus__has_hybrid()) {
if (perf_pmus__num_core_pmus() > 1) {
struct perf_pmu *pmu = NULL;
__u64 type = PERF_TYPE_RAW;
......
......@@ -1294,7 +1294,7 @@ static int record__open(struct record *rec)
* of waiting or event synthesis.
*/
if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
perf_pmus__has_hybrid()) {
perf_pmus__num_core_pmus() > 1) {
pos = evlist__get_tracking_event(evlist);
if (!evsel__is_dummy_event(pos)) {
/* Set up dummy event. */
......@@ -2193,7 +2193,7 @@ static void record__uniquify_name(struct record *rec)
char *new_name;
int ret;
if (!perf_pmus__has_hybrid())
if (perf_pmus__num_core_pmus() == 1)
return;
evlist__for_each_entry(evlist, pos) {
......
......@@ -185,8 +185,15 @@ static int test__attr(struct test_suite *test __maybe_unused, int subtest __mayb
char path_dir[PATH_MAX];
char *exec_path;
if (perf_pmus__has_hybrid())
if (perf_pmus__num_core_pmus() > 1) {
/*
* TODO: Attribute tests hard code the PMU type. If there are >1
* core PMU then each PMU will have a different type whic
* requires additional support.
*/
pr_debug("Skip test on hybrid systems");
return TEST_SKIP;
}
/* First try development tree tests. */
if (!lstat("./tests", &st))
......
......@@ -302,11 +302,8 @@ static int test__parse_metric(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);
TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);
TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0);
if (!perf_pmus__has_hybrid()) {
TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
}
TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
return 0;
}
......
......@@ -375,17 +375,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
cpu_clocks_evsel = evlist__last(evlist);
/* Second event */
if (perf_pmus__has_hybrid()) {
cycles = "cpu_core/cycles/u";
err = parse_event(evlist, cycles);
if (err) {
cycles = "cpu_atom/cycles/u";
pr_debug("Trying %s\n", cycles);
err = parse_event(evlist, cycles);
}
} else {
err = parse_event(evlist, cycles);
}
err = parse_event(evlist, cycles);
if (err) {
pr_debug("Failed to parse event %s\n", cycles);
goto out_err;
......
......@@ -41,18 +41,8 @@ static int session_write_header(char *path)
session = perf_session__new(&data, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
if (!perf_pmus__has_hybrid()) {
session->evlist = evlist__new_default();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
} else {
struct parse_events_error err;
session->evlist = evlist__new();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
parse_events_error__init(&err);
parse_events(session->evlist, "cpu_core/cycles/", &err);
parse_events_error__exit(&err);
}
session->evlist = evlist__new_default();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
......
......@@ -472,15 +472,9 @@ struct hybrid_topology *hybrid_topology__new(void)
{
struct perf_pmu *pmu = NULL;
struct hybrid_topology *tp = NULL;
u32 nr = 0, i = 0;
int nr = perf_pmus__num_core_pmus(), i = 0;
if (!perf_pmus__has_hybrid())
return NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
nr++;
if (nr == 0)
if (nr <= 1)
return NULL;
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
......
......@@ -3140,7 +3140,7 @@ void evsel__zero_per_pkg(struct evsel *evsel)
*/
bool evsel__is_hybrid(const struct evsel *evsel)
{
if (!perf_pmus__has_hybrid())
if (perf_pmus__num_core_pmus() == 1)
return false;
return evsel->core.is_pmu_core;
......
......@@ -1605,7 +1605,7 @@ static int write_pmu_caps(struct feat_fd *ff,
* Write hybrid pmu caps first to maintain compatibility with
* older perf tool.
*/
if (perf_pmus__has_hybrid()) {
if (perf_pmus__num_core_pmus() > 1) {
pmu = NULL;
while ((pmu = perf_pmus__scan_core(pmu))) {
ret = __write_pmu_caps(ff, pmu, true);
......
......@@ -121,6 +121,7 @@ int perf_mem_events__init(void)
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
char sysfs_name[100];
struct perf_pmu *pmu = NULL;
/*
* If the event entry isn't valid, skip initialization
......@@ -129,18 +130,9 @@ int perf_mem_events__init(void)
if (!e->tag)
continue;
if (!perf_pmus__has_hybrid()) {
scnprintf(sysfs_name, sizeof(sysfs_name),
e->sysfs_name, "cpu");
e->supported = perf_mem_event__supported(mnt, sysfs_name);
} else {
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
scnprintf(sysfs_name, sizeof(sysfs_name),
e->sysfs_name, pmu->name);
e->supported |= perf_mem_event__supported(mnt, sysfs_name);
}
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
e->supported |= perf_mem_event__supported(mnt, sysfs_name);
}
if (e->supported)
......@@ -196,7 +188,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
if (!e->record)
continue;
if (!perf_pmus__has_hybrid()) {
if (perf_pmus__num_core_pmus() == 1) {
if (!e->supported) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(j, NULL));
......
......@@ -274,7 +274,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids,
const char *metric_id;
struct evsel *ev;
size_t ids_size, matched_events, i;
bool all_pmus = !strcmp(pmu, "all") || !perf_pmus__has_hybrid() || !is_pmu_hybrid(pmu);
bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
*out_metric_events = NULL;
ids_size = hashmap__size(ids);
......
......@@ -464,24 +464,6 @@ bool perf_pmus__have_event(const char *pname, const char *name)
return pmu && perf_pmu__have_event(pmu, name);
}
bool perf_pmus__has_hybrid(void)
{
static bool hybrid_scanned, has_hybrid;
if (!hybrid_scanned) {
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
if (is_pmu_hybrid(pmu->name)) {
has_hybrid = true;
break;
}
}
hybrid_scanned = true;
}
return has_hybrid;
}
int perf_pmus__num_core_pmus(void)
{
static int count;
......
......@@ -18,7 +18,6 @@ const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
int perf_pmus__num_mem_pmus(void);
void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state);
bool perf_pmus__have_event(const char *pname, const char *name);
bool perf_pmus__has_hybrid(void);
int perf_pmus__num_core_pmus(void);
#endif /* __PMUS_H */
......@@ -103,9 +103,9 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
return EOF;
}
bool perf_pmus__has_hybrid(void)
int perf_pmus__num_core_pmus(void)
{
return false;
return 1;
}
bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
......
......@@ -696,7 +696,7 @@ static bool evlist__has_hybrid(struct evlist *evlist)
{
struct evsel *evsel;
if (!perf_pmus__has_hybrid())
if (perf_pmus__num_core_pmus() == 1)
return false;
evlist__for_each_entry(evlist, evsel) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment