Commit 534e86bc authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

bpf: Add 'release on unlock' logic for bpf_list_push_{front,back}

This commit implements the delayed release logic for bpf_list_push_front
and bpf_list_push_back.

Once a node has been added to the list, it's pointer changes to
PTR_UNTRUSTED. However, it is only released once the lock protecting the
list is unlocked. For such PTR_TO_BTF_ID | MEM_ALLOC with PTR_UNTRUSTED
set but an active ref_obj_id, it is still permitted to read them as long
as the lock is held. Writing to them is not allowed.

This allows having read access to push items we no longer own until we
release the lock guarding the list, allowing a little more flexibility
when working with these APIs.

Note that enabling write support has fairly tricky interactions with
what happens inside the critical section. Just as an example, currently,
bpf_obj_drop is not permitted, but if it were, being able to write to
the PTR_UNTRUSTED pointer while the object gets released back to the
memory allocator would violate safety properties we wish to guarantee
(i.e. not crashing the kernel). The memory could be reused for a
different type in the BPF program or even in the kernel as it gets
eventually kfree'd.

Not enabling bpf_obj_drop inside the critical section would appear to
prevent all of the above, but that is more of an artifical limitation
right now. Since the write support is tangled with how we handle
potential aliasing of nodes inside the critical section that may or may
not be part of the list anymore, it has been deferred to a future patch.
Acked-by: default avatarDave Marchevsky <davemarchevsky@fb.com>
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20221118015614.2013203-18-memxor@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 8cab76ec
...@@ -223,6 +223,11 @@ struct bpf_reference_state { ...@@ -223,6 +223,11 @@ struct bpf_reference_state {
* exiting a callback function. * exiting a callback function.
*/ */
int callback_ref; int callback_ref;
/* Mark the reference state to release the registers sharing the same id
* on bpf_spin_unlock (for nodes that we will lose ownership to but are
* safe to access inside the critical section).
*/
bool release_on_unlock;
}; };
/* state of the program: /* state of the program:
......
...@@ -5652,7 +5652,9 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, ...@@ -5652,7 +5652,9 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
cur->active_lock.ptr = btf; cur->active_lock.ptr = btf;
cur->active_lock.id = reg->id; cur->active_lock.id = reg->id;
} else { } else {
struct bpf_func_state *fstate = cur_func(env);
void *ptr; void *ptr;
int i;
if (map) if (map)
ptr = map; ptr = map;
...@@ -5670,6 +5672,23 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, ...@@ -5670,6 +5672,23 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
} }
cur->active_lock.ptr = NULL; cur->active_lock.ptr = NULL;
cur->active_lock.id = 0; cur->active_lock.id = 0;
for (i = 0; i < fstate->acquired_refs; i++) {
int err;
/* Complain on error because this reference state cannot
* be freed before this point, as bpf_spin_lock critical
* section does not allow functions that release the
* allocated object immediately.
*/
if (!fstate->refs[i].release_on_unlock)
continue;
err = release_reference(env, fstate->refs[i].id);
if (err) {
verbose(env, "failed to release release_on_unlock reference");
return err;
}
}
} }
return 0; return 0;
} }
...@@ -8260,6 +8279,42 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env, ...@@ -8260,6 +8279,42 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
return 0; return 0;
} }
static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
{
struct bpf_func_state *state = cur_func(env);
struct bpf_reg_state *reg;
int i;
/* bpf_spin_lock only allows calling list_push and list_pop, no BPF
* subprogs, no global functions. This means that the references would
* not be released inside the critical section but they may be added to
* the reference state, and the acquired_refs are never copied out for a
* different frame as BPF to BPF calls don't work in bpf_spin_lock
* critical sections.
*/
if (!ref_obj_id) {
verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
return -EFAULT;
}
for (i = 0; i < state->acquired_refs; i++) {
if (state->refs[i].id == ref_obj_id) {
if (state->refs[i].release_on_unlock) {
verbose(env, "verifier internal error: expected false release_on_unlock");
return -EFAULT;
}
state->refs[i].release_on_unlock = true;
/* Now mark everyone sharing same ref_obj_id as untrusted */
bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
if (reg->ref_obj_id == ref_obj_id)
reg->type |= PTR_UNTRUSTED;
}));
return 0;
}
}
verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
return -EFAULT;
}
/* Implementation details: /* Implementation details:
* *
* Each register points to some region of memory, which we define as an * Each register points to some region of memory, which we define as an
...@@ -8433,7 +8488,8 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, ...@@ -8433,7 +8488,8 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
btf_name_by_offset(field->list_head.btf, et->name_off)); btf_name_by_offset(field->list_head.btf, et->name_off));
return -EINVAL; return -EINVAL;
} }
return 0; /* Set arg#1 for expiration after unlock */
return ref_set_release_on_unlock(env, reg->ref_obj_id);
} }
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta) static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment