Commit e16d2f1a authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: add support for bpf_spin_lock to cgroup local storage

Allow 'struct bpf_spin_lock' to reside inside cgroup local storage.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent d83525ca
...@@ -147,6 +147,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, ...@@ -147,6 +147,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOMEM; return -ENOMEM;
memcpy(&new->data[0], value, map->value_size); memcpy(&new->data[0], value, map->value_size);
check_and_init_map_lock(map, new->data);
new = xchg(&storage->buf, new); new = xchg(&storage->buf, new);
kfree_rcu(new, rcu); kfree_rcu(new, rcu);
...@@ -483,6 +484,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, ...@@ -483,6 +484,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
storage->buf = kmalloc_node(size, flags, map->numa_node); storage->buf = kmalloc_node(size, flags, map->numa_node);
if (!storage->buf) if (!storage->buf)
goto enomem; goto enomem;
check_and_init_map_lock(map, storage->buf->data);
} else { } else {
storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
if (!storage->percpu_buf) if (!storage->percpu_buf)
......
...@@ -482,7 +482,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, ...@@ -482,7 +482,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (map_value_has_spin_lock(map)) { if (map_value_has_spin_lock(map)) {
if (map->map_type != BPF_MAP_TYPE_HASH && if (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_ARRAY) map->map_type != BPF_MAP_TYPE_ARRAY &&
map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
return -ENOTSUPP; return -ENOTSUPP;
if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
map->value_size) { map->value_size) {
......
...@@ -3089,6 +3089,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -3089,6 +3089,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].map_ptr = meta.map_ptr;
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
if (map_value_has_spin_lock(meta.map_ptr))
regs[BPF_REG_0].id = ++env->id_gen;
} else { } else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].id = ++env->id_gen;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment