Commit 81f1d7a5 authored by Benjamin Tissoires's avatar Benjamin Tissoires Committed by Alexei Starovoitov

bpf: wq: add bpf_wq_set_callback_impl

To support sleepable async callbacks, we need to tell push_async_cb()
whether the cb is sleepable or not.

The verifier now detects that we are in bpf_wq_set_callback_impl and
can allow a sleepable callback to happen.
Signed-off-by: default avatarBenjamin Tissoires <bentiss@kernel.org>
Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent e3d9eac9
...@@ -426,6 +426,7 @@ struct bpf_verifier_state { ...@@ -426,6 +426,7 @@ struct bpf_verifier_state {
* while they are still in use. * while they are still in use.
*/ */
bool used_as_loop_entry; bool used_as_loop_entry;
bool in_sleepable;
/* first and last insn idx of this verifier state */ /* first and last insn idx of this verifier state */
u32 first_insn_idx; u32 first_insn_idx;
......
...@@ -2711,6 +2711,20 @@ __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) ...@@ -2711,6 +2711,20 @@ __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
} }
__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
unsigned int flags,
void *aux__ign)
{
struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign;
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
if (flags)
return -EINVAL;
return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
}
__bpf_kfunc_end_defs(); __bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids) BTF_KFUNCS_START(generic_btf_ids)
...@@ -2789,6 +2803,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_size) ...@@ -2789,6 +2803,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_size)
BTF_ID_FLAGS(func, bpf_dynptr_clone) BTF_ID_FLAGS(func, bpf_dynptr_clone)
BTF_ID_FLAGS(func, bpf_modify_return_test_tp) BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
BTF_ID_FLAGS(func, bpf_wq_init) BTF_ID_FLAGS(func, bpf_wq_init)
BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
BTF_KFUNCS_END(common_btf_ids) BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = { static const struct btf_kfunc_id_set common_kfunc_set = {
......
...@@ -501,8 +501,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id) ...@@ -501,8 +501,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
} }
static bool is_sync_callback_calling_kfunc(u32 btf_id); static bool is_sync_callback_calling_kfunc(u32 btf_id);
static bool is_async_callback_calling_kfunc(u32 btf_id);
static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn); static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
static bool is_sync_callback_calling_function(enum bpf_func_id func_id) static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{ {
return func_id == BPF_FUNC_for_each_map_elem || return func_id == BPF_FUNC_for_each_map_elem ||
...@@ -530,7 +534,8 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn) ...@@ -530,7 +534,8 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
static bool is_async_callback_calling_insn(struct bpf_insn *insn) static bool is_async_callback_calling_insn(struct bpf_insn *insn)
{ {
return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm); return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) ||
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
} }
static bool is_may_goto_insn(struct bpf_insn *insn) static bool is_may_goto_insn(struct bpf_insn *insn)
...@@ -1429,6 +1434,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, ...@@ -1429,6 +1434,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
} }
dst_state->speculative = src->speculative; dst_state->speculative = src->speculative;
dst_state->active_rcu_lock = src->active_rcu_lock; dst_state->active_rcu_lock = src->active_rcu_lock;
dst_state->in_sleepable = src->in_sleepable;
dst_state->curframe = src->curframe; dst_state->curframe = src->curframe;
dst_state->active_lock.ptr = src->active_lock.ptr; dst_state->active_lock.ptr = src->active_lock.ptr;
dst_state->active_lock.id = src->active_lock.id; dst_state->active_lock.id = src->active_lock.id;
...@@ -2404,7 +2410,7 @@ static void init_func_state(struct bpf_verifier_env *env, ...@@ -2404,7 +2410,7 @@ static void init_func_state(struct bpf_verifier_env *env,
/* Similar to push_stack(), but for async callbacks */ /* Similar to push_stack(), but for async callbacks */
static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx, int insn_idx, int prev_insn_idx,
int subprog) int subprog, bool is_sleepable)
{ {
struct bpf_verifier_stack_elem *elem; struct bpf_verifier_stack_elem *elem;
struct bpf_func_state *frame; struct bpf_func_state *frame;
...@@ -2431,6 +2437,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, ...@@ -2431,6 +2437,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
* Initialize it similar to do_check_common(). * Initialize it similar to do_check_common().
*/ */
elem->st.branches = 1; elem->st.branches = 1;
elem->st.in_sleepable = is_sleepable;
frame = kzalloc(sizeof(*frame), GFP_KERNEL); frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame) if (!frame)
goto err; goto err;
...@@ -5278,7 +5285,8 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, ...@@ -5278,7 +5285,8 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
static bool in_sleepable(struct bpf_verifier_env *env) static bool in_sleepable(struct bpf_verifier_env *env)
{ {
return env->prog->sleepable; return env->prog->sleepable ||
(env->cur_state && env->cur_state->in_sleepable);
} }
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
...@@ -9513,7 +9521,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins ...@@ -9513,7 +9521,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
*/ */
env->subprog_info[subprog].is_cb = true; env->subprog_info[subprog].is_cb = true;
if (bpf_pseudo_kfunc_call(insn) && if (bpf_pseudo_kfunc_call(insn) &&
!is_sync_callback_calling_kfunc(insn->imm)) { !is_callback_calling_kfunc(insn->imm)) {
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
func_id_name(insn->imm), insn->imm); func_id_name(insn->imm), insn->imm);
return -EFAULT; return -EFAULT;
...@@ -9527,10 +9535,11 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins ...@@ -9527,10 +9535,11 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
if (is_async_callback_calling_insn(insn)) { if (is_async_callback_calling_insn(insn)) {
struct bpf_verifier_state *async_cb; struct bpf_verifier_state *async_cb;
/* there is no real recursion here. timer callbacks are async */ /* there is no real recursion here. timer and workqueue callbacks are async */
env->subprog_info[subprog].is_async_cb = true; env->subprog_info[subprog].is_async_cb = true;
async_cb = push_async_cb(env, env->subprog_info[subprog].start, async_cb = push_async_cb(env, env->subprog_info[subprog].start,
insn_idx, subprog); insn_idx, subprog,
is_bpf_wq_set_callback_impl_kfunc(insn->imm));
if (!async_cb) if (!async_cb)
return -EFAULT; return -EFAULT;
callee = async_cb->frame[0]; callee = async_cb->frame[0];
...@@ -11017,6 +11026,7 @@ enum special_kfunc_type { ...@@ -11017,6 +11026,7 @@ enum special_kfunc_type {
KF_bpf_percpu_obj_new_impl, KF_bpf_percpu_obj_new_impl,
KF_bpf_percpu_obj_drop_impl, KF_bpf_percpu_obj_drop_impl,
KF_bpf_throw, KF_bpf_throw,
KF_bpf_wq_set_callback_impl,
KF_bpf_iter_css_task_new, KF_bpf_iter_css_task_new,
}; };
...@@ -11041,6 +11051,7 @@ BTF_ID(func, bpf_dynptr_clone) ...@@ -11041,6 +11051,7 @@ BTF_ID(func, bpf_dynptr_clone)
BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_new_impl)
BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_percpu_obj_drop_impl)
BTF_ID(func, bpf_throw) BTF_ID(func, bpf_throw)
BTF_ID(func, bpf_wq_set_callback_impl)
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
BTF_ID(func, bpf_iter_css_task_new) BTF_ID(func, bpf_iter_css_task_new)
#endif #endif
...@@ -11069,6 +11080,7 @@ BTF_ID(func, bpf_dynptr_clone) ...@@ -11069,6 +11080,7 @@ BTF_ID(func, bpf_dynptr_clone)
BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_new_impl)
BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_percpu_obj_drop_impl)
BTF_ID(func, bpf_throw) BTF_ID(func, bpf_throw)
BTF_ID(func, bpf_wq_set_callback_impl)
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
BTF_ID(func, bpf_iter_css_task_new) BTF_ID(func, bpf_iter_css_task_new)
#else #else
...@@ -11402,12 +11414,28 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id) ...@@ -11402,12 +11414,28 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
} }
static bool is_async_callback_calling_kfunc(u32 btf_id)
{
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
}
static bool is_bpf_throw_kfunc(struct bpf_insn *insn) static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
{ {
return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
insn->imm == special_kfunc_list[KF_bpf_throw]; insn->imm == special_kfunc_list[KF_bpf_throw];
} }
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
{
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
}
static bool is_callback_calling_kfunc(u32 btf_id)
{
return is_sync_callback_calling_kfunc(btf_id) ||
is_async_callback_calling_kfunc(btf_id);
}
static bool is_rbtree_lock_required_kfunc(u32 btf_id) static bool is_rbtree_lock_required_kfunc(u32 btf_id)
{ {
return is_bpf_rbtree_api_kfunc(btf_id); return is_bpf_rbtree_api_kfunc(btf_id);
...@@ -12219,6 +12247,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -12219,6 +12247,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
} }
} }
if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_timer_callback_state);
if (err) {
verbose(env, "kfunc %s#%d failed callback verification\n",
func_name, meta.func_id);
return err;
}
}
rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
...@@ -16968,6 +17006,9 @@ static bool states_equal(struct bpf_verifier_env *env, ...@@ -16968,6 +17006,9 @@ static bool states_equal(struct bpf_verifier_env *env,
if (old->active_rcu_lock != cur->active_rcu_lock) if (old->active_rcu_lock != cur->active_rcu_lock)
return false; return false;
if (old->in_sleepable != cur->in_sleepable)
return false;
/* for states to be equal callsites have to be the same /* for states to be equal callsites have to be the same
* and all frame states need to be equivalent * and all frame states need to be equivalent
*/ */
...@@ -19639,6 +19680,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -19639,6 +19680,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
*cnt = 1; *cnt = 1;
} else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) {
struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) };
insn_buf[0] = ld_addrs[0];
insn_buf[1] = ld_addrs[1];
insn_buf[2] = *insn;
*cnt = 3;
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment