Commit 4c9fbff5 authored by Martin KaFai Lau's avatar Martin KaFai Lau

Merge branch 'Two fixes for cpu-map'

Hou Tao says:

====================

The patchset fixes two reported warning in cpu-map when running
xdp_redirect_cpu and some RT threads concurrently. Patch #1 fixes
the warning in __cpu_map_ring_cleanup() when kthread is stopped
prematurely. Patch #2 fixes the warning in __xdp_return() when
there are pending skbs in ptr_ring.

Please see individual patches for more details. And comments are always
welcome.

====================
Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parents bcc29b7f 7c62b75c
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/completion.h>
#include <trace/events/xdp.h> #include <trace/events/xdp.h>
#include <linux/btf_ids.h> #include <linux/btf_ids.h>
...@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry { ...@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
struct rcu_head rcu; struct rcu_head rcu;
struct work_struct kthread_stop_wq; struct work_struct kthread_stop_wq;
struct completion kthread_running;
}; };
struct bpf_cpu_map { struct bpf_cpu_map {
...@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) ...@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
* invoked cpu_map_kthread_stop(). Catch any broken behaviour * invoked cpu_map_kthread_stop(). Catch any broken behaviour
* gracefully and warn once. * gracefully and warn once.
*/ */
struct xdp_frame *xdpf; void *ptr;
while ((xdpf = ptr_ring_consume(ring))) while ((ptr = ptr_ring_consume(ring))) {
if (WARN_ON_ONCE(xdpf)) WARN_ON_ONCE(1);
xdp_return_frame(xdpf); if (unlikely(__ptr_test_bit(0, &ptr))) {
__ptr_clear_bit(0, &ptr);
kfree_skb(ptr);
continue;
}
xdp_return_frame(ptr);
}
} }
static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
...@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) ...@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
static void cpu_map_kthread_stop(struct work_struct *work) static void cpu_map_kthread_stop(struct work_struct *work)
{ {
struct bpf_cpu_map_entry *rcpu; struct bpf_cpu_map_entry *rcpu;
int err;
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
...@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work) ...@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
rcu_barrier(); rcu_barrier();
/* kthread_stop will wake_up_process and wait for it to complete */ /* kthread_stop will wake_up_process and wait for it to complete */
err = kthread_stop(rcpu->kthread); kthread_stop(rcpu->kthread);
if (err) {
/* kthread_stop may be called before cpu_map_kthread_run
* is executed, so we need to release the memory related
* to rcpu.
*/
put_cpu_map_entry(rcpu);
}
} }
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
...@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, ...@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
return nframes; return nframes;
} }
static int cpu_map_kthread_run(void *data) static int cpu_map_kthread_run(void *data)
{ {
struct bpf_cpu_map_entry *rcpu = data; struct bpf_cpu_map_entry *rcpu = data;
complete(&rcpu->kthread_running);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
/* When kthread gives stop order, then rcpu have been disconnected /* When kthread gives stop order, then rcpu have been disconnected
...@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, ...@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
goto free_ptr_ring; goto free_ptr_ring;
/* Setup kthread */ /* Setup kthread */
init_completion(&rcpu->kthread_running);
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
"cpumap/%d/map:%d", cpu, "cpumap/%d/map:%d", cpu,
map->id); map->id);
...@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, ...@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
kthread_bind(rcpu->kthread, cpu); kthread_bind(rcpu->kthread, cpu);
wake_up_process(rcpu->kthread); wake_up_process(rcpu->kthread);
/* Make sure kthread has been running, so kthread_stop() will not
* stop the kthread prematurely and all pending frames or skbs
* will be handled by the kthread before kthread_stop() returns.
*/
wait_for_completion(&rcpu->kthread_running);
return rcpu; return rcpu;
free_prog: free_prog:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment