Commit dc92febf authored by Alexei Starovoitov's avatar Alexei Starovoitov

bpf: Don't check for recursion in bpf_wq_work.

__bpf_prog_enter_sleepable_recur does recursion check which is not applicable
to wq callback. The callback function is part of bpf program and bpf prog might
be running on the same cpu. So recursion check would incorrectly prevent
callback from running. The code can call __bpf_prog_enter_sleepable(), but
run_ctx would be fake, hence use explicit rcu_read_lock_trace();
migrate_disable(); to address this problem. Another reason to open code is
__bpf_prog_enter* are not available in !JIT configs.
Reported-by: default avatarkernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202404241719.IIGdpAku-lkp@intel.com/
Closes: https://lore.kernel.org/oe-kbuild-all/202404241811.FFV4Bku3-lkp@intel.com/
Fixes: eb48f6cd ("bpf: wq: add bpf_wq_init")
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 6e10b635
...@@ -1178,9 +1178,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) ...@@ -1178,9 +1178,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
static void bpf_wq_work(struct work_struct *work) static void bpf_wq_work(struct work_struct *work)
{ {
struct bpf_work *w = container_of(work, struct bpf_work, work); struct bpf_work *w = container_of(work, struct bpf_work, work);
struct bpf_tramp_run_ctx __maybe_unused run_ctx;
struct bpf_async_cb *cb = &w->cb; struct bpf_async_cb *cb = &w->cb;
struct bpf_prog *prog = cb->prog;
struct bpf_map *map = cb->map; struct bpf_map *map = cb->map;
bpf_callback_t callback_fn; bpf_callback_t callback_fn;
void *value = cb->value; void *value = cb->value;
...@@ -1190,7 +1188,7 @@ static void bpf_wq_work(struct work_struct *work) ...@@ -1190,7 +1188,7 @@ static void bpf_wq_work(struct work_struct *work)
BTF_TYPE_EMIT(struct bpf_wq); BTF_TYPE_EMIT(struct bpf_wq);
callback_fn = READ_ONCE(cb->callback_fn); callback_fn = READ_ONCE(cb->callback_fn);
if (!callback_fn || !prog) if (!callback_fn)
return; return;
if (map->map_type == BPF_MAP_TYPE_ARRAY) { if (map->map_type == BPF_MAP_TYPE_ARRAY) {
...@@ -1203,19 +1201,13 @@ static void bpf_wq_work(struct work_struct *work) ...@@ -1203,19 +1201,13 @@ static void bpf_wq_work(struct work_struct *work)
key = value - round_up(map->key_size, 8); key = value - round_up(map->key_size, 8);
} }
run_ctx.bpf_cookie = 0; rcu_read_lock_trace();
migrate_disable();
if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
/* recursion detected */
__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
return;
}
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
/* The verifier checked that return value is zero. */
__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, migrate_enable();
&run_ctx); rcu_read_unlock_trace();
} }
static void bpf_wq_delete_work(struct work_struct *work) static void bpf_wq_delete_work(struct work_struct *work)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment