Commit ec09a42a authored by Elena Reshetova's avatar Elena Reshetova Committed by Arnaldo Carvalho de Melo

perf cpumap: Convert cpu_map.refcnt from atomic_t to refcount_t

The refcount_t type and corresponding API should be used instead of atomic_t
when the variable is used as a reference counter.

This allows to avoid accidental refcounter overflows that might lead to
use-after-free situations.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Kook <keescook@chromium.org>
Tested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Windsor <dwindsor@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kees Kook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: alsa-devel@alsa-project.org
Link: http://lkml.kernel.org/r/1487691303-31858-3-git-send-email-elena.reshetova@intel.com
[ fixed mixed conversion to refcount in tests/cpumap.c ]
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 79c5fe6d
...@@ -66,7 +66,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused, ...@@ -66,7 +66,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong nr", map->nr == 2); TEST_ASSERT_VAL("wrong nr", map->nr == 2);
TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1); TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1);
TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256); TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256);
TEST_ASSERT_VAL("wrong refcnt", atomic_read(&map->refcnt) == 1); TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
cpu_map__put(map); cpu_map__put(map);
return 0; return 0;
} }
......
...@@ -29,7 +29,7 @@ static struct cpu_map *cpu_map__default_new(void) ...@@ -29,7 +29,7 @@ static struct cpu_map *cpu_map__default_new(void)
cpus->map[i] = i; cpus->map[i] = i;
cpus->nr = nr_cpus; cpus->nr = nr_cpus;
atomic_set(&cpus->refcnt, 1); refcount_set(&cpus->refcnt, 1);
} }
return cpus; return cpus;
...@@ -43,7 +43,7 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) ...@@ -43,7 +43,7 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
if (cpus != NULL) { if (cpus != NULL) {
cpus->nr = nr_cpus; cpus->nr = nr_cpus;
memcpy(cpus->map, tmp_cpus, payload_size); memcpy(cpus->map, tmp_cpus, payload_size);
atomic_set(&cpus->refcnt, 1); refcount_set(&cpus->refcnt, 1);
} }
return cpus; return cpus;
...@@ -252,7 +252,7 @@ struct cpu_map *cpu_map__dummy_new(void) ...@@ -252,7 +252,7 @@ struct cpu_map *cpu_map__dummy_new(void)
if (cpus != NULL) { if (cpus != NULL) {
cpus->nr = 1; cpus->nr = 1;
cpus->map[0] = -1; cpus->map[0] = -1;
atomic_set(&cpus->refcnt, 1); refcount_set(&cpus->refcnt, 1);
} }
return cpus; return cpus;
...@@ -269,7 +269,7 @@ struct cpu_map *cpu_map__empty_new(int nr) ...@@ -269,7 +269,7 @@ struct cpu_map *cpu_map__empty_new(int nr)
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
cpus->map[i] = -1; cpus->map[i] = -1;
atomic_set(&cpus->refcnt, 1); refcount_set(&cpus->refcnt, 1);
} }
return cpus; return cpus;
...@@ -278,7 +278,7 @@ struct cpu_map *cpu_map__empty_new(int nr) ...@@ -278,7 +278,7 @@ struct cpu_map *cpu_map__empty_new(int nr)
static void cpu_map__delete(struct cpu_map *map) static void cpu_map__delete(struct cpu_map *map)
{ {
if (map) { if (map) {
WARN_ONCE(atomic_read(&map->refcnt) != 0, WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_map refcnt unbalanced\n"); "cpu_map refcnt unbalanced\n");
free(map); free(map);
} }
...@@ -287,13 +287,13 @@ static void cpu_map__delete(struct cpu_map *map) ...@@ -287,13 +287,13 @@ static void cpu_map__delete(struct cpu_map *map)
struct cpu_map *cpu_map__get(struct cpu_map *map) struct cpu_map *cpu_map__get(struct cpu_map *map)
{ {
if (map) if (map)
atomic_inc(&map->refcnt); refcount_inc(&map->refcnt);
return map; return map;
} }
void cpu_map__put(struct cpu_map *map) void cpu_map__put(struct cpu_map *map)
{ {
if (map && atomic_dec_and_test(&map->refcnt)) if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map); cpu_map__delete(map);
} }
...@@ -357,7 +357,7 @@ int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, ...@@ -357,7 +357,7 @@ int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
/* ensure we process id in increasing order */ /* ensure we process id in increasing order */
qsort(c->map, c->nr, sizeof(int), cmp_ids); qsort(c->map, c->nr, sizeof(int), cmp_ids);
atomic_set(&c->refcnt, 1); refcount_set(&c->refcnt, 1);
*res = c; *res = c;
return 0; return 0;
} }
......
...@@ -3,13 +3,13 @@ ...@@ -3,13 +3,13 @@
#include <stdio.h> #include <stdio.h>
#include <stdbool.h> #include <stdbool.h>
#include <linux/atomic.h> #include <linux/refcount.h>
#include "perf.h" #include "perf.h"
#include "util/debug.h" #include "util/debug.h"
struct cpu_map { struct cpu_map {
atomic_t refcnt; refcount_t refcnt;
int nr; int nr;
int map[]; int map[];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment