Commit 7d1b529f authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf cpumap: Add internal nr and cpu accessors

These accessors assume the map is non-null. Rewrite functions to use
rather than direct accesses. This also fixes a build regression for
REFCNT_CHECKING in the intersect function.
Suggested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230527072210.2900565-2-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent caa90a7b
...@@ -99,6 +99,11 @@ static int cmp_cpu(const void *a, const void *b) ...@@ -99,6 +99,11 @@ static int cmp_cpu(const void *a, const void *b)
return cpu_a->cpu - cpu_b->cpu; return cpu_a->cpu - cpu_b->cpu;
} }
static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{
return RC_CHK_ACCESS(cpus)->map[idx];
}
static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus) static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
{ {
size_t payload_size = nr_cpus * sizeof(struct perf_cpu); size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
...@@ -111,8 +116,12 @@ static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu ...@@ -111,8 +116,12 @@ static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu
/* Remove dups */ /* Remove dups */
j = 0; j = 0;
for (i = 0; i < nr_cpus; i++) { for (i = 0; i < nr_cpus; i++) {
if (i == 0 || RC_CHK_ACCESS(cpus)->map[i].cpu != RC_CHK_ACCESS(cpus)->map[i - 1].cpu) if (i == 0 ||
RC_CHK_ACCESS(cpus)->map[j++].cpu = RC_CHK_ACCESS(cpus)->map[i].cpu; __perf_cpu_map__cpu(cpus, i).cpu !=
__perf_cpu_map__cpu(cpus, i - 1).cpu) {
RC_CHK_ACCESS(cpus)->map[j++].cpu =
__perf_cpu_map__cpu(cpus, i).cpu;
}
} }
perf_cpu_map__set_nr(cpus, j); perf_cpu_map__set_nr(cpus, j);
assert(j <= nr_cpus); assert(j <= nr_cpus);
...@@ -269,26 +278,31 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) ...@@ -269,26 +278,31 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
return cpus; return cpus;
} }
static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
return RC_CHK_ACCESS(cpus)->nr;
}
struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{ {
struct perf_cpu result = { struct perf_cpu result = {
.cpu = -1 .cpu = -1
}; };
if (cpus && idx < RC_CHK_ACCESS(cpus)->nr) if (cpus && idx < __perf_cpu_map__nr(cpus))
return RC_CHK_ACCESS(cpus)->map[idx]; return __perf_cpu_map__cpu(cpus, idx);
return result; return result;
} }
int perf_cpu_map__nr(const struct perf_cpu_map *cpus) int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{ {
return cpus ? RC_CHK_ACCESS(cpus)->nr : 1; return cpus ? __perf_cpu_map__nr(cpus) : 1;
} }
bool perf_cpu_map__empty(const struct perf_cpu_map *map) bool perf_cpu_map__empty(const struct perf_cpu_map *map)
{ {
return map ? RC_CHK_ACCESS(map)->map[0].cpu == -1 : true; return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
} }
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu) int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
...@@ -299,10 +313,10 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu) ...@@ -299,10 +313,10 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
return -1; return -1;
low = 0; low = 0;
high = RC_CHK_ACCESS(cpus)->nr; high = __perf_cpu_map__nr(cpus);
while (low < high) { while (low < high) {
int idx = (low + high) / 2; int idx = (low + high) / 2;
struct perf_cpu cpu_at_idx = RC_CHK_ACCESS(cpus)->map[idx]; struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
if (cpu_at_idx.cpu == cpu.cpu) if (cpu_at_idx.cpu == cpu.cpu)
return idx; return idx;
...@@ -328,7 +342,9 @@ struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map) ...@@ -328,7 +342,9 @@ struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
}; };
// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well. // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
return RC_CHK_ACCESS(map)->nr > 0 ? RC_CHK_ACCESS(map)->map[RC_CHK_ACCESS(map)->nr - 1] : result; return __perf_cpu_map__nr(map) > 0
? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
: result;
} }
/** Is 'b' a subset of 'a'. */ /** Is 'b' a subset of 'a'. */
...@@ -336,15 +352,15 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu ...@@ -336,15 +352,15 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
{ {
if (a == b || !b) if (a == b || !b)
return true; return true;
if (!a || RC_CHK_ACCESS(b)->nr > RC_CHK_ACCESS(a)->nr) if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
return false; return false;
for (int i = 0, j = 0; i < RC_CHK_ACCESS(a)->nr; i++) { for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
if (RC_CHK_ACCESS(a)->map[i].cpu > RC_CHK_ACCESS(b)->map[j].cpu) if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
return false; return false;
if (RC_CHK_ACCESS(a)->map[i].cpu == RC_CHK_ACCESS(b)->map[j].cpu) { if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
j++; j++;
if (j == RC_CHK_ACCESS(b)->nr) if (j == __perf_cpu_map__nr(b))
return true; return true;
} }
} }
...@@ -374,27 +390,27 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, ...@@ -374,27 +390,27 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
return perf_cpu_map__get(other); return perf_cpu_map__get(other);
} }
tmp_len = RC_CHK_ACCESS(orig)->nr + RC_CHK_ACCESS(other)->nr; tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu)); tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus) if (!tmp_cpus)
return NULL; return NULL;
/* Standard merge algorithm from wikipedia */ /* Standard merge algorithm from wikipedia */
i = j = k = 0; i = j = k = 0;
while (i < RC_CHK_ACCESS(orig)->nr && j < RC_CHK_ACCESS(other)->nr) { while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
if (RC_CHK_ACCESS(orig)->map[i].cpu <= RC_CHK_ACCESS(other)->map[j].cpu) { if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
if (RC_CHK_ACCESS(orig)->map[i].cpu == RC_CHK_ACCESS(other)->map[j].cpu) if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
j++; j++;
tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++]; tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
} else } else
tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++]; tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
} }
while (i < RC_CHK_ACCESS(orig)->nr) while (i < __perf_cpu_map__nr(orig))
tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++]; tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
while (j < RC_CHK_ACCESS(other)->nr) while (j < __perf_cpu_map__nr(other))
tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++]; tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
assert(k <= tmp_len); assert(k <= tmp_len);
merged = cpu_map__trim_new(k, tmp_cpus); merged = cpu_map__trim_new(k, tmp_cpus);
...@@ -416,20 +432,20 @@ struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, ...@@ -416,20 +432,20 @@ struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
if (perf_cpu_map__is_subset(orig, other)) if (perf_cpu_map__is_subset(orig, other))
return perf_cpu_map__get(other); return perf_cpu_map__get(other);
tmp_len = max(orig->nr, other->nr); tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu)); tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus) if (!tmp_cpus)
return NULL; return NULL;
i = j = k = 0; i = j = k = 0;
while (i < orig->nr && j < other->nr) { while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
if (orig->map[i].cpu < other->map[j].cpu) if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
i++; i++;
else if (orig->map[i].cpu > other->map[j].cpu) else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
j++; j++;
else { else {
j++; j++;
tmp_cpus[k++] = orig->map[i++]; tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
} }
} }
if (k) if (k)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment