Commit 1d6d2bea authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf stat: Add perf_stat_process_percore()

The perf_stat_process_percore() is to aggregate counts for an event per-core
even if the aggr_mode is AGGR_NONE.  This is enabled when user requested it
on the command line.

To handle that, it keeps the per-cpu counts at first.  And then it aggregates
the counts that have the same core id in the aggr->counts and updates the
values for each cpu back.

Later, per-core events will skip one of the CPUs unless percore-show-thread
option is given.  In that case, it can simply print all cpu stats with the
updated (per-core) values.
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221018020227.85905-17-namhyung@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 942c5593
......@@ -488,6 +488,7 @@ static void process_counters(void)
}
perf_stat_merge_counters(&stat_config, evsel_list);
perf_stat_process_percore(&stat_config, evsel_list);
}
static void process_interval(void)
......
......@@ -691,6 +691,77 @@ void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *ev
evsel__merge_stats(evsel, config);
}
static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
{
struct perf_stat_evsel *ps = evsel->stats;
struct perf_counts_values counts = { 0, };
struct aggr_cpu_id id;
struct perf_cpu cpu;
int idx;
/* collect per-core counts */
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_aggr *aggr = &ps->aggr[idx];
id = aggr_cpu_id__core(cpu, NULL);
if (!aggr_cpu_id__equal(core_id, &id))
continue;
counts.val += aggr->counts.val;
counts.ena += aggr->counts.ena;
counts.run += aggr->counts.run;
}
/* update aggregated per-core counts for each CPU */
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_aggr *aggr = &ps->aggr[idx];
id = aggr_cpu_id__core(cpu, NULL);
if (!aggr_cpu_id__equal(core_id, &id))
continue;
aggr->counts.val = counts.val;
aggr->counts.ena = counts.ena;
aggr->counts.run = counts.run;
aggr->used = true;
}
}
/* we have an aggr_map for cpu, but want to aggregate the counters per-core */
static void evsel__process_percore(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
struct aggr_cpu_id core_id;
struct perf_cpu cpu;
int idx;
if (!evsel->percore)
return;
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_aggr *aggr = &ps->aggr[idx];
if (aggr->used)
continue;
core_id = aggr_cpu_id__core(cpu, NULL);
evsel__update_percore_stats(evsel, &core_id);
}
}
/* process cpu stats on per-core events */
void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
{
struct evsel *evsel;
if (config->aggr_mode != AGGR_NONE)
return;
evlist__for_each_entry(evlist, evsel)
evsel__process_percore(evsel);
}
int perf_event__process_stat_event(struct perf_session *session,
union perf_event *event)
{
......
......@@ -51,6 +51,8 @@ struct perf_stat_aggr {
int nr;
/* whether any entry has failed to read/process event */
bool failed;
/* to mark this data is processed already */
bool used;
};
/* per-evsel event stats */
......@@ -281,6 +283,7 @@ void evlist__reset_aggr_stats(struct evlist *evlist);
int perf_stat_process_counter(struct perf_stat_config *config,
struct evsel *counter);
void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist);
void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist);
struct perf_tool;
union perf_event;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment