Commit 38f01d8d authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

libperf: Add perf_cpu_map__get()/perf_cpu_map__put()

Moving the following functions:

  cpu_map__get()
  cpu_map__put()

to libperf with following names:

  perf_cpu_map__get()
  perf_cpu_map__put()

Committer notes:

Added fixes for arm/arm64
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-31-jolsa@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 397721e0
......@@ -181,7 +181,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
err = 0;
out:
cpu_map__put(online_cpus);
perf_cpu_map__put(online_cpus);
return err;
}
......@@ -517,7 +517,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
}
}
cpu_map__put(online_cpus);
perf_cpu_map__put(online_cpus);
return (CS_ETM_HEADER_SIZE +
(etmv4 * CS_ETMV4_PRIV_SIZE) +
......@@ -679,7 +679,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
if (cpu_map__has(cpu_map, i))
cs_etm_get_metadata(i, &offset, itr, info);
cpu_map__put(online_cpus);
perf_cpu_map__put(online_cpus);
return 0;
}
......
......@@ -27,7 +27,7 @@ char *get_cpuid_str(struct perf_pmu *pmu)
return NULL;
/* read midr from list of cpus mapped to this pmu */
cpus = cpu_map__get(pmu->cpus);
cpus = perf_cpu_map__get(pmu->cpus);
for (cpu = 0; cpu < cpus->nr; cpu++) {
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
sysfs, cpus->map[cpu]);
......@@ -60,6 +60,6 @@ char *get_cpuid_str(struct perf_pmu *pmu)
buf = NULL;
}
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
return buf;
}
......@@ -206,7 +206,7 @@ static int reset_tracing_cpu(void)
int ret;
ret = set_tracing_cpumask(cpumap);
cpu_map__put(cpumap);
perf_cpu_map__put(cpumap);
return ret;
}
......
......@@ -933,8 +933,8 @@ static int perf_stat_init_aggr_mode(void)
static void perf_stat__exit_aggr_mode(void)
{
cpu_map__put(stat_config.aggr_map);
cpu_map__put(stat_config.cpus_aggr_map);
perf_cpu_map__put(stat_config.aggr_map);
perf_cpu_map__put(stat_config.cpus_aggr_map);
stat_config.aggr_map = NULL;
stat_config.cpus_aggr_map = NULL;
}
......
......@@ -3,6 +3,8 @@
#include <stdlib.h>
#include <linux/refcount.h>
#include <internal/cpumap.h>
#include <asm/bug.h>
#include <stdio.h>
struct perf_cpu_map *perf_cpu_map__dummy_new(void)
{
......@@ -16,3 +18,25 @@ struct perf_cpu_map *perf_cpu_map__dummy_new(void)
return cpus;
}
static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_map refcnt unbalanced\n");
free(map);
}
}
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
{
if (map)
refcount_inc(&map->refcnt);
return map;
}
void perf_cpu_map__put(struct perf_cpu_map *map)
{
if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map);
}
......@@ -7,5 +7,7 @@
struct perf_cpu_map;
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
#endif /* __LIBPERF_CPUMAP_H */
......@@ -2,6 +2,8 @@ LIBPERF_0.0.1 {
global:
libperf_set_print;
perf_cpu_map__dummy_new;
perf_cpu_map__get;
perf_cpu_map__put;
local:
*;
};
......@@ -21,7 +21,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
}
if (map)
cpu_map__put(map);
perf_cpu_map__put(map);
return bm;
}
......
......@@ -655,7 +655,7 @@ static int do_test_code_reading(bool try_kcore)
* and will be freed by following perf_evlist__set_maps
* call. Getting refference to keep them alive.
*/
cpu_map__get(cpus);
perf_cpu_map__get(cpus);
thread_map__get(threads);
perf_evlist__set_maps(evlist, NULL, NULL);
evlist__delete(evlist);
......@@ -705,7 +705,7 @@ static int do_test_code_reading(bool try_kcore)
if (evlist) {
evlist__delete(evlist);
} else {
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
thread_map__put(threads);
}
machine__delete_threads(machine);
......
......@@ -39,7 +39,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong cpu", map->map[i] == i);
}
cpu_map__put(map);
perf_cpu_map__put(map);
return 0;
}
......@@ -68,7 +68,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1);
TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256);
TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
cpu_map__put(map);
perf_cpu_map__put(map);
return 0;
}
......@@ -83,7 +83,7 @@ int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __may
TEST_ASSERT_VAL("failed to synthesize map",
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
/* This one is better stores in cpu values. */
cpus = cpu_map__new("1,256");
......@@ -91,7 +91,7 @@ int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __may
TEST_ASSERT_VAL("failed to synthesize map",
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
return 0;
}
......
......@@ -132,7 +132,7 @@ static int attach__cpu_disabled(struct evlist *evlist)
return err;
}
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
return evsel__enable(evsel);
}
......@@ -154,7 +154,7 @@ static int attach__cpu_enabled(struct evlist *evlist)
if (err == -EACCES)
return TEST_SKIP;
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
return err ? TEST_FAIL : TEST_OK;
}
......
......@@ -73,7 +73,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1);
TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2);
TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3);
cpu_map__put(map);
perf_cpu_map__put(map);
return 0;
}
......@@ -113,6 +113,6 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
cpu_map__put(evsel->own_cpus);
perf_cpu_map__put(evsel->own_cpus);
return 0;
}
......@@ -149,7 +149,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
evlist__disable(evlist);
evlist__delete(evlist);
} else {
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
thread_map__put(threads);
}
......
......@@ -32,7 +32,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
}
if (map)
cpu_map__put(map);
perf_cpu_map__put(map);
else
free(bm);
......
......@@ -155,7 +155,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
cpus = NULL;
threads = NULL;
out_free_cpus:
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
out_free_threads:
thread_map__put(threads);
return err;
......
......@@ -120,7 +120,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
out_evsel_delete:
evsel__delete(evsel);
out_cpu_map_delete:
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
out_thread_map_delete:
thread_map__put(threads);
return err;
......
......@@ -125,7 +125,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
}
out_free_maps:
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
thread_map__put(threads);
out_delete_evlist:
evlist__delete(evlist);
......
......@@ -569,7 +569,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
evlist__disable(evlist);
evlist__delete(evlist);
} else {
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
thread_map__put(threads);
}
......
......@@ -135,7 +135,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
}
out_free_maps:
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
thread_map__put(threads);
out_delete_evlist:
evlist__delete(evlist);
......
......@@ -133,7 +133,7 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe
}
ret = check_cpu_topology(path, map);
cpu_map__put(map);
perf_cpu_map__put(map);
free_path:
unlink(path);
......
......@@ -273,28 +273,6 @@ struct perf_cpu_map *cpu_map__empty_new(int nr)
return cpus;
}
static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_map refcnt unbalanced\n");
free(map);
}
}
struct perf_cpu_map *cpu_map__get(struct perf_cpu_map *map)
{
if (map)
refcount_inc(&map->refcnt);
return map;
}
void cpu_map__put(struct perf_cpu_map *map)
{
if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map);
}
static int cpu__get_topology_int(int cpu, const char *name, int *value)
{
char path[PATH_MAX];
......
......@@ -29,9 +29,6 @@ int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep
int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep);
const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */
struct perf_cpu_map *cpu_map__get(struct perf_cpu_map *map);
void cpu_map__put(struct perf_cpu_map *map);
static inline int cpu_map__socket(struct perf_cpu_map *sock, int s)
{
if (!sock || s > sock->nr || s < 0)
......
......@@ -219,7 +219,7 @@ struct cpu_topology *cpu_topology__new(void)
}
out_free:
cpu_map__put(map);
perf_cpu_map__put(map);
if (ret) {
cpu_topology__delete(tp);
tp = NULL;
......@@ -335,7 +335,7 @@ struct numa_topology *numa_topology__new(void)
out:
free(buf);
fclose(fp);
cpu_map__put(node_map);
perf_cpu_map__put(node_map);
return tp;
}
......
......@@ -179,7 +179,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->cpu);
for (i = 0; i < env->nr_numa_nodes; i++)
cpu_map__put(env->numa_nodes[i].map);
perf_cpu_map__put(env->numa_nodes[i].map);
zfree(&env->numa_nodes);
for (i = 0; i < env->caches_cnt; i++)
......
......@@ -1403,7 +1403,7 @@ size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
else
ret += fprintf(fp, "failed to get cpumap from event\n");
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
return ret;
}
......
......@@ -141,7 +141,7 @@ void evlist__delete(struct evlist *evlist)
perf_evlist__munmap(evlist);
evlist__close(evlist);
cpu_map__put(evlist->cpus);
perf_cpu_map__put(evlist->cpus);
thread_map__put(evlist->threads);
evlist->cpus = NULL;
evlist->threads = NULL;
......@@ -158,11 +158,11 @@ static void __perf_evlist__propagate_maps(struct evlist *evlist,
* keep it, if there's no target cpu list defined.
*/
if (!evsel->own_cpus || evlist->has_user_cpus) {
cpu_map__put(evsel->cpus);
evsel->cpus = cpu_map__get(evlist->cpus);
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->cpus);
} else if (evsel->cpus != evsel->own_cpus) {
cpu_map__put(evsel->cpus);
evsel->cpus = cpu_map__get(evsel->own_cpus);
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
}
thread_map__put(evsel->threads);
......@@ -1115,8 +1115,8 @@ void perf_evlist__set_maps(struct evlist *evlist, struct perf_cpu_map *cpus,
* the caller to increase the reference count.
*/
if (cpus != evlist->cpus) {
cpu_map__put(evlist->cpus);
evlist->cpus = cpu_map__get(cpus);
perf_cpu_map__put(evlist->cpus);
evlist->cpus = perf_cpu_map__get(cpus);
}
if (threads != evlist->threads) {
......@@ -1383,7 +1383,7 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist)
out:
return err;
out_put:
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
goto out;
}
......
......@@ -1325,8 +1325,8 @@ void perf_evsel__exit(struct evsel *evsel)
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
cpu_map__put(evsel->cpus);
cpu_map__put(evsel->own_cpus);
perf_cpu_map__put(evsel->cpus);
perf_cpu_map__put(evsel->own_cpus);
thread_map__put(evsel->threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
......
......@@ -332,8 +332,8 @@ __add_event(struct list_head *list, int *idx,
return NULL;
(*idx)++;
evsel->cpus = cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus);
evsel->cpus = perf_cpu_map__get(cpus);
evsel->own_cpus = perf_cpu_map__get(cpus);
evsel->system_wide = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
......
......@@ -626,7 +626,7 @@ static bool pmu_is_uncore(const char *name)
snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
cpus = __pmu_cpumask(path);
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
return !!cpus;
}
......
......@@ -557,7 +557,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
{
cpu_map__put(pcpus->cpus);
perf_cpu_map__put(pcpus->cpus);
Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
}
......
......@@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
if (!cpus)
return false;
cpu = cpus->map[0];
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
do {
ret = perf_do_probe_api(fn, cpu, try[i++]);
......@@ -122,7 +122,7 @@ bool perf_can_record_cpu_wide(void)
if (!cpus)
return false;
cpu = cpus->map[0];
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
if (fd < 0)
......@@ -278,7 +278,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
struct perf_cpu_map *cpus = cpu_map__new(NULL);
cpu = cpus ? cpus->map[0] : 0;
cpu_map__put(cpus);
perf_cpu_map__put(cpus);
} else {
cpu = evlist->cpus->map[0];
}
......
......@@ -2310,7 +2310,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
err = 0;
out_delete_map:
cpu_map__put(map);
perf_cpu_map__put(map);
return err;
}
......
......@@ -745,7 +745,7 @@ static int str_to_bitmap(char *s, cpumask_t *b)
set_bit(c, cpumask_bits(b));
}
cpu_map__put(m);
perf_cpu_map__put(m);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment