Commit 12c8d0f4 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Rename few bpf_mem_alloc fields.

Rename:
-       struct rcu_head rcu;
-       struct llist_head free_by_rcu;
-       struct llist_head waiting_for_gp;
-       atomic_t call_rcu_in_progress;
+       struct llist_head free_by_rcu_ttrace;
+       struct llist_head waiting_for_gp_ttrace;
+       struct rcu_head rcu_ttrace;
+       atomic_t call_rcu_ttrace_in_progress;
...
-	static void do_call_rcu(struct bpf_mem_cache *c)
+	static void do_call_rcu_ttrace(struct bpf_mem_cache *c)

to better indicate intended use.

The 'tasks trace' is shortened to 'ttrace' to reduce verbosity.
No functional changes.

Later patches will add free_by_rcu/waiting_for_gp fields to be used with normal RCU.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarHou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-2-alexei.starovoitov@gmail.com
parent c21de5fc
...@@ -99,10 +99,11 @@ struct bpf_mem_cache { ...@@ -99,10 +99,11 @@ struct bpf_mem_cache {
int low_watermark, high_watermark, batch; int low_watermark, high_watermark, batch;
int percpu_size; int percpu_size;
struct rcu_head rcu; /* list of objects to be freed after RCU tasks trace GP */
struct llist_head free_by_rcu; struct llist_head free_by_rcu_ttrace;
struct llist_head waiting_for_gp; struct llist_head waiting_for_gp_ttrace;
atomic_t call_rcu_in_progress; struct rcu_head rcu_ttrace;
atomic_t call_rcu_ttrace_in_progress;
}; };
struct bpf_mem_caches { struct bpf_mem_caches {
...@@ -165,18 +166,18 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) ...@@ -165,18 +166,18 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
old_memcg = set_active_memcg(memcg); old_memcg = set_active_memcg(memcg);
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
/* /*
* free_by_rcu is only manipulated by irq work refill_work(). * free_by_rcu_ttrace is only manipulated by irq work refill_work().
* IRQ works on the same CPU are called sequentially, so it is * IRQ works on the same CPU are called sequentially, so it is
* safe to use __llist_del_first() here. If alloc_bulk() is * safe to use __llist_del_first() here. If alloc_bulk() is
* invoked by the initial prefill, there will be no running * invoked by the initial prefill, there will be no running
* refill_work(), so __llist_del_first() is fine as well. * refill_work(), so __llist_del_first() is fine as well.
* *
* In most cases, objects on free_by_rcu are from the same CPU. * In most cases, objects on free_by_rcu_ttrace are from the same CPU.
* If some objects come from other CPUs, it doesn't incur any * If some objects come from other CPUs, it doesn't incur any
* harm because NUMA_NO_NODE means the preference for current * harm because NUMA_NO_NODE means the preference for current
* numa node and it is not a guarantee. * numa node and it is not a guarantee.
*/ */
obj = __llist_del_first(&c->free_by_rcu); obj = __llist_del_first(&c->free_by_rcu_ttrace);
if (!obj) { if (!obj) {
/* Allocate, but don't deplete atomic reserves that typical /* Allocate, but don't deplete atomic reserves that typical
* GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
...@@ -232,10 +233,10 @@ static void free_all(struct llist_node *llnode, bool percpu) ...@@ -232,10 +233,10 @@ static void free_all(struct llist_node *llnode, bool percpu)
static void __free_rcu(struct rcu_head *head) static void __free_rcu(struct rcu_head *head)
{ {
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
atomic_set(&c->call_rcu_in_progress, 0); atomic_set(&c->call_rcu_ttrace_in_progress, 0);
} }
static void __free_rcu_tasks_trace(struct rcu_head *head) static void __free_rcu_tasks_trace(struct rcu_head *head)
...@@ -254,32 +255,32 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj) ...@@ -254,32 +255,32 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj)
struct llist_node *llnode = obj; struct llist_node *llnode = obj;
/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
* Nothing races to add to free_by_rcu list. * Nothing races to add to free_by_rcu_ttrace list.
*/ */
__llist_add(llnode, &c->free_by_rcu); __llist_add(llnode, &c->free_by_rcu_ttrace);
} }
static void do_call_rcu(struct bpf_mem_cache *c) static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
{ {
struct llist_node *llnode, *t; struct llist_node *llnode, *t;
if (atomic_xchg(&c->call_rcu_in_progress, 1)) if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1))
return; return;
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu_ttrace))
/* There is no concurrent __llist_add(waiting_for_gp) access. /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
* It doesn't race with llist_del_all either. * It doesn't race with llist_del_all either.
* But there could be two concurrent llist_del_all(waiting_for_gp): * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
* from __free_rcu() and from drain_mem_cache(). * from __free_rcu() and from drain_mem_cache().
*/ */
__llist_add(llnode, &c->waiting_for_gp); __llist_add(llnode, &c->waiting_for_gp_ttrace);
/* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
* If RCU Tasks Trace grace period implies RCU grace period, free * If RCU Tasks Trace grace period implies RCU grace period, free
* these elements directly, else use call_rcu() to wait for normal * these elements directly, else use call_rcu() to wait for normal
* progs to finish and finally do free_one() on each element. * progs to finish and finally do free_one() on each element.
*/ */
call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace); call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
} }
static void free_bulk(struct bpf_mem_cache *c) static void free_bulk(struct bpf_mem_cache *c)
...@@ -307,7 +308,7 @@ static void free_bulk(struct bpf_mem_cache *c) ...@@ -307,7 +308,7 @@ static void free_bulk(struct bpf_mem_cache *c)
/* and drain free_llist_extra */ /* and drain free_llist_extra */
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
enque_to_free(c, llnode); enque_to_free(c, llnode);
do_call_rcu(c); do_call_rcu_ttrace(c);
} }
static void bpf_mem_refill(struct irq_work *work) static void bpf_mem_refill(struct irq_work *work)
...@@ -441,13 +442,13 @@ static void drain_mem_cache(struct bpf_mem_cache *c) ...@@ -441,13 +442,13 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
/* No progs are using this bpf_mem_cache, but htab_map_free() called /* No progs are using this bpf_mem_cache, but htab_map_free() called
* bpf_mem_cache_free() for all remaining elements and they can be in * bpf_mem_cache_free() for all remaining elements and they can be in
* free_by_rcu or in waiting_for_gp lists, so drain those lists now. * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
* *
* Except for waiting_for_gp list, there are no concurrent operations * Except for waiting_for_gp_ttrace list, there are no concurrent operations
* on these lists, so it is safe to use __llist_del_all(). * on these lists, so it is safe to use __llist_del_all().
*/ */
free_all(__llist_del_all(&c->free_by_rcu), percpu); free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(llist_del_all(&c->waiting_for_gp), percpu); free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
free_all(__llist_del_all(&c->free_llist), percpu); free_all(__llist_del_all(&c->free_llist), percpu);
free_all(__llist_del_all(&c->free_llist_extra), percpu); free_all(__llist_del_all(&c->free_llist_extra), percpu);
} }
...@@ -462,7 +463,7 @@ static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) ...@@ -462,7 +463,7 @@ static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
static void free_mem_alloc(struct bpf_mem_alloc *ma) static void free_mem_alloc(struct bpf_mem_alloc *ma)
{ {
/* waiting_for_gp lists was drained, but __free_rcu might /* waiting_for_gp_ttrace lists was drained, but __free_rcu might
* still execute. Wait for it now before we freeing percpu caches. * still execute. Wait for it now before we freeing percpu caches.
* *
* rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
...@@ -535,7 +536,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) ...@@ -535,7 +536,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
*/ */
irq_work_sync(&c->refill_work); irq_work_sync(&c->refill_work);
drain_mem_cache(c); drain_mem_cache(c);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress); rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
} }
/* objcg is the same across cpus */ /* objcg is the same across cpus */
if (c->objcg) if (c->objcg)
...@@ -550,7 +551,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) ...@@ -550,7 +551,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
c = &cc->cache[i]; c = &cc->cache[i];
irq_work_sync(&c->refill_work); irq_work_sync(&c->refill_work);
drain_mem_cache(c); drain_mem_cache(c);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress); rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
} }
} }
if (c->objcg) if (c->objcg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment