Commit e4730423 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf: Fix cgroup local storage prog tracking

Recently noticed that we're tracking programs related to local storage maps
through their prog pointer. This is a wrong assumption since the prog pointer
can still change throughout the verification process, for example, whenever
bpf_patch_insn_single() is called.

Therefore, the prog pointer that was assigned via bpf_cgroup_storage_assign()
is not guaranteed to be the same as we pass in bpf_cgroup_storage_release()
and the map would therefore remain in busy state forever. Fix this by using
the prog's aux pointer which is stable throughout verification and beyond.

Fixes: de9cbbaa ("bpf: introduce cgroup storage maps")
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/1471c69eca3022218666f909bc927a92388fd09e.1576580332.git.daniel@iogearbox.net
parent a2ea0746
...@@ -157,8 +157,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, ...@@ -157,8 +157,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup, struct cgroup *cgroup,
enum bpf_attach_type type); enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
...@@ -360,9 +360,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, ...@@ -360,9 +360,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
static inline void bpf_cgroup_storage_set( static inline void bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; } struct bpf_map *map) { return 0; }
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
struct bpf_map *map) {} struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
......
...@@ -2043,8 +2043,7 @@ static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux) ...@@ -2043,8 +2043,7 @@ static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
for_each_cgroup_storage_type(stype) { for_each_cgroup_storage_type(stype) {
if (!aux->cgroup_storage[stype]) if (!aux->cgroup_storage[stype])
continue; continue;
bpf_cgroup_storage_release(aux->prog, bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]);
aux->cgroup_storage[stype]);
} }
} }
......
...@@ -20,7 +20,7 @@ struct bpf_cgroup_storage_map { ...@@ -20,7 +20,7 @@ struct bpf_cgroup_storage_map {
struct bpf_map map; struct bpf_map map;
spinlock_t lock; spinlock_t lock;
struct bpf_prog *prog; struct bpf_prog_aux *aux;
struct rb_root root; struct rb_root root;
struct list_head list; struct list_head list;
}; };
...@@ -420,7 +420,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = { ...@@ -420,7 +420,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_seq_show_elem = cgroup_storage_seq_show_elem, .map_seq_show_elem = cgroup_storage_seq_show_elem,
}; };
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
{ {
enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
struct bpf_cgroup_storage_map *map = map_to_storage(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map);
...@@ -428,14 +428,14 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) ...@@ -428,14 +428,14 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
spin_lock_bh(&map->lock); spin_lock_bh(&map->lock);
if (map->prog && map->prog != prog) if (map->aux && map->aux != aux)
goto unlock; goto unlock;
if (prog->aux->cgroup_storage[stype] && if (aux->cgroup_storage[stype] &&
prog->aux->cgroup_storage[stype] != _map) aux->cgroup_storage[stype] != _map)
goto unlock; goto unlock;
map->prog = prog; map->aux = aux;
prog->aux->cgroup_storage[stype] = _map; aux->cgroup_storage[stype] = _map;
ret = 0; ret = 0;
unlock: unlock:
spin_unlock_bh(&map->lock); spin_unlock_bh(&map->lock);
...@@ -443,16 +443,16 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) ...@@ -443,16 +443,16 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
return ret; return ret;
} }
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *_map)
{ {
enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
struct bpf_cgroup_storage_map *map = map_to_storage(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map);
spin_lock_bh(&map->lock); spin_lock_bh(&map->lock);
if (map->prog == prog) { if (map->aux == aux) {
WARN_ON(prog->aux->cgroup_storage[stype] != _map); WARN_ON(aux->cgroup_storage[stype] != _map);
map->prog = NULL; map->aux = NULL;
prog->aux->cgroup_storage[stype] = NULL; aux->cgroup_storage[stype] = NULL;
} }
spin_unlock_bh(&map->lock); spin_unlock_bh(&map->lock);
} }
......
...@@ -8268,7 +8268,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) ...@@ -8268,7 +8268,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
env->used_maps[env->used_map_cnt++] = map; env->used_maps[env->used_map_cnt++] = map;
if (bpf_map_is_cgroup_storage(map) && if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog, map)) { bpf_cgroup_storage_assign(env->prog->aux, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n"); verbose(env, "only one cgroup storage of each type is allowed\n");
fdput(f); fdput(f);
return -EBUSY; return -EBUSY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment