Commit 197827a0 authored by Hou Tao's avatar Hou Tao Committed by Martin KaFai Lau

bpf: Use this_cpu_{inc|dec|inc_return} for bpf_task_storage_busy

Now migrate_disable() does not disable preemption and under some
architectures (e.g. arm64) __this_cpu_{inc|dec|inc_return} are neither
preemption-safe nor IRQ-safe, so for fully preemptible kernel concurrent
lookups or updates on the same task local storage and on the same CPU
may make bpf_task_storage_busy be imbalanced, and
bpf_task_storage_trylock() on the specific cpu will always fail.

Fixing it by using this_cpu_{inc|dec|inc_return} when manipulating
bpf_task_storage_busy.

Fixes: bc235cdb ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")
Signed-off-by: default avatarHou Tao <houtao1@huawei.com>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20220901061938.3789460-2-houtao@huaweicloud.comSigned-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parent c9ae8c96
...@@ -555,11 +555,11 @@ void bpf_local_storage_map_free(struct bpf_local_storage_map *smap, ...@@ -555,11 +555,11 @@ void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem, map_node))) { struct bpf_local_storage_elem, map_node))) {
if (busy_counter) { if (busy_counter) {
migrate_disable(); migrate_disable();
__this_cpu_inc(*busy_counter); this_cpu_inc(*busy_counter);
} }
bpf_selem_unlink(selem, false); bpf_selem_unlink(selem, false);
if (busy_counter) { if (busy_counter) {
__this_cpu_dec(*busy_counter); this_cpu_dec(*busy_counter);
migrate_enable(); migrate_enable();
} }
cond_resched_rcu(); cond_resched_rcu();
......
...@@ -26,20 +26,20 @@ static DEFINE_PER_CPU(int, bpf_task_storage_busy); ...@@ -26,20 +26,20 @@ static DEFINE_PER_CPU(int, bpf_task_storage_busy);
static void bpf_task_storage_lock(void) static void bpf_task_storage_lock(void)
{ {
migrate_disable(); migrate_disable();
__this_cpu_inc(bpf_task_storage_busy); this_cpu_inc(bpf_task_storage_busy);
} }
static void bpf_task_storage_unlock(void) static void bpf_task_storage_unlock(void)
{ {
__this_cpu_dec(bpf_task_storage_busy); this_cpu_dec(bpf_task_storage_busy);
migrate_enable(); migrate_enable();
} }
static bool bpf_task_storage_trylock(void) static bool bpf_task_storage_trylock(void)
{ {
migrate_disable(); migrate_disable();
if (unlikely(__this_cpu_inc_return(bpf_task_storage_busy) != 1)) { if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
__this_cpu_dec(bpf_task_storage_busy); this_cpu_dec(bpf_task_storage_busy);
migrate_enable(); migrate_enable();
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment