Commit 4e814da0 authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

bpf: Allow locking bpf_spin_lock in allocated objects

Allow locking a bpf_spin_lock in an allocated object, in addition to
already supported map value pointers. The handling is similar to that of
map values, by just preserving the reg->id of PTR_TO_BTF_ID | MEM_ALLOC
as well, and adjusting process_spin_lock to work with them and remember
the id in verifier state.

Refactor the existing process_spin_lock to work with PTR_TO_BTF_ID |
MEM_ALLOC in addition to PTR_TO_MAP_VALUE. We need to update the
reg_may_point_to_spin_lock which is used in mark_ptr_or_null_reg to
preserve reg->id, that will be used in env->cur_state->active_spin_lock
to remember the currently held spin lock.

Also update the comment describing bpf_spin_lock implementation details
to also talk about PTR_TO_BTF_ID | MEM_ALLOC type.
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20221118015614.2013203-9-memxor@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 865ce09a
...@@ -336,6 +336,7 @@ const struct bpf_func_proto bpf_spin_lock_proto = { ...@@ -336,6 +336,7 @@ const struct bpf_func_proto bpf_spin_lock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_VOID, .ret_type = RET_VOID,
.arg1_type = ARG_PTR_TO_SPIN_LOCK, .arg1_type = ARG_PTR_TO_SPIN_LOCK,
.arg1_btf_id = BPF_PTR_POISON,
}; };
static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
...@@ -358,6 +359,7 @@ const struct bpf_func_proto bpf_spin_unlock_proto = { ...@@ -358,6 +359,7 @@ const struct bpf_func_proto bpf_spin_unlock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_VOID, .ret_type = RET_VOID,
.arg1_type = ARG_PTR_TO_SPIN_LOCK, .arg1_type = ARG_PTR_TO_SPIN_LOCK,
.arg1_btf_id = BPF_PTR_POISON,
}; };
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
......
...@@ -451,10 +451,24 @@ static bool reg_type_not_null(enum bpf_reg_type type) ...@@ -451,10 +451,24 @@ static bool reg_type_not_null(enum bpf_reg_type type)
type == PTR_TO_SOCK_COMMON; type == PTR_TO_SOCK_COMMON;
} }
static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
{
struct btf_record *rec = NULL;
struct btf_struct_meta *meta;
if (reg->type == PTR_TO_MAP_VALUE) {
rec = reg->map_ptr->record;
} else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) {
meta = btf_find_struct_meta(reg->btf, reg->btf_id);
if (meta)
rec = meta->record;
}
return rec;
}
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{ {
return reg->type == PTR_TO_MAP_VALUE && return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
btf_record_has_field(reg->map_ptr->record, BPF_SPIN_LOCK);
} }
static bool type_is_rdonly_mem(u32 type) static bool type_is_rdonly_mem(u32 type)
...@@ -5564,23 +5578,26 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state ...@@ -5564,23 +5578,26 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state
} }
/* Implementation details: /* Implementation details:
* bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
* bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
* Two bpf_map_lookups (even with the same key) will have different reg->id. * Two bpf_map_lookups (even with the same key) will have different reg->id.
* For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after * Two separate bpf_obj_new will also have different reg->id.
* value_or_null->value transition, since the verifier only cares about * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
* the range of access to valid map value pointer and doesn't care about actual * clears reg->id after value_or_null->value transition, since the verifier only
* address of the map element. * cares about the range of access to valid map value pointer and doesn't care
* about actual address of the map element.
* For maps with 'struct bpf_spin_lock' inside map value the verifier keeps * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
* reg->id > 0 after value_or_null->value transition. By doing so * reg->id > 0 after value_or_null->value transition. By doing so
* two bpf_map_lookups will be considered two different pointers that * two bpf_map_lookups will be considered two different pointers that
* point to different bpf_spin_locks. * point to different bpf_spin_locks. Likewise for pointers to allocated objects
* returned from bpf_obj_new.
* The verifier allows taking only one bpf_spin_lock at a time to avoid * The verifier allows taking only one bpf_spin_lock at a time to avoid
* dead-locks. * dead-locks.
* Since only one bpf_spin_lock is allowed the checks are simpler than * Since only one bpf_spin_lock is allowed the checks are simpler than
* reg_is_refcounted() logic. The verifier needs to remember only * reg_is_refcounted() logic. The verifier needs to remember only
* one spin_lock instead of array of acquired_refs. * one spin_lock instead of array of acquired_refs.
* cur_state->active_spin_lock remembers which map value element got locked * cur_state->active_spin_lock remembers which map value element or allocated
* and clears it after bpf_spin_unlock. * object got locked and clears it after bpf_spin_unlock.
*/ */
static int process_spin_lock(struct bpf_verifier_env *env, int regno, static int process_spin_lock(struct bpf_verifier_env *env, int regno,
bool is_lock) bool is_lock)
...@@ -5588,8 +5605,10 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, ...@@ -5588,8 +5605,10 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_state *cur = env->cur_state;
bool is_const = tnum_is_const(reg->var_off); bool is_const = tnum_is_const(reg->var_off);
struct bpf_map *map = reg->map_ptr;
u64 val = reg->var_off.value; u64 val = reg->var_off.value;
struct bpf_map *map = NULL;
struct btf *btf = NULL;
struct btf_record *rec;
if (!is_const) { if (!is_const) {
verbose(env, verbose(env,
...@@ -5597,19 +5616,27 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, ...@@ -5597,19 +5616,27 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
regno); regno);
return -EINVAL; return -EINVAL;
} }
if (!map->btf) { if (reg->type == PTR_TO_MAP_VALUE) {
verbose(env, map = reg->map_ptr;
"map '%s' has to have BTF in order to use bpf_spin_lock\n", if (!map->btf) {
map->name); verbose(env,
return -EINVAL; "map '%s' has to have BTF in order to use bpf_spin_lock\n",
map->name);
return -EINVAL;
}
} else {
btf = reg->btf;
} }
if (!btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
verbose(env, "map '%s' has no valid bpf_spin_lock\n", map->name); rec = reg_btf_record(reg);
if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
map ? map->name : "kptr");
return -EINVAL; return -EINVAL;
} }
if (map->record->spin_lock_off != val + reg->off) { if (rec->spin_lock_off != val + reg->off) {
verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n", verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
val + reg->off, map->record->spin_lock_off); val + reg->off, rec->spin_lock_off);
return -EINVAL; return -EINVAL;
} }
if (is_lock) { if (is_lock) {
...@@ -5815,13 +5842,19 @@ static const struct bpf_reg_types int_ptr_types = { ...@@ -5815,13 +5842,19 @@ static const struct bpf_reg_types int_ptr_types = {
}, },
}; };
static const struct bpf_reg_types spin_lock_types = {
.types = {
PTR_TO_MAP_VALUE,
PTR_TO_BTF_ID | MEM_ALLOC,
}
};
static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } }; static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } }; static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } };
static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
...@@ -5946,6 +5979,11 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, ...@@ -5946,6 +5979,11 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
return -EACCES; return -EACCES;
} }
} }
} else if (type_is_alloc(reg->type)) {
if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock) {
verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
return -EFAULT;
}
} }
return 0; return 0;
...@@ -6062,7 +6100,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -6062,7 +6100,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
goto skip_type_check; goto skip_type_check;
/* arg_btf_id and arg_size are in a union. */ /* arg_btf_id and arg_size are in a union. */
if (base_type(arg_type) == ARG_PTR_TO_BTF_ID) if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
arg_btf_id = fn->arg_btf_id[arg]; arg_btf_id = fn->arg_btf_id[arg];
err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
...@@ -6680,9 +6719,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn) ...@@ -6680,9 +6719,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
int i; int i;
for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
return false; return !!fn->arg_btf_id[i];
if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
return fn->arg_btf_id[i] == BPF_PTR_POISON;
if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
/* arg_btf_id and arg_size are in a union. */ /* arg_btf_id and arg_size are in a union. */
(base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment