Commit 822fb26b authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Add a hint to allocated objects.

To address OOM issue when one cpu is allocating and another cpu is freeing add
a target bpf_mem_cache hint to allocated objects and when local cpu free_llist
overflows free to that bpf_mem_cache. The hint addresses the OOM while
maintaining the same performance for common case when alloc/free are done on the
same cpu.

Note that do_call_rcu_ttrace() now has to check 'draining' flag in one more case,
since do_call_rcu_ttrace() is called not only for current cpu.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarHou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-9-alexei.starovoitov@gmail.com
parent d114dde2
...@@ -99,6 +99,7 @@ struct bpf_mem_cache { ...@@ -99,6 +99,7 @@ struct bpf_mem_cache {
int low_watermark, high_watermark, batch; int low_watermark, high_watermark, batch;
int percpu_size; int percpu_size;
bool draining; bool draining;
struct bpf_mem_cache *tgt;
/* list of objects to be freed after RCU tasks trace GP */ /* list of objects to be freed after RCU tasks trace GP */
struct llist_head free_by_rcu_ttrace; struct llist_head free_by_rcu_ttrace;
...@@ -199,18 +200,11 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) ...@@ -199,18 +200,11 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
/* /*
* free_by_rcu_ttrace is only manipulated by irq work refill_work(). * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
* IRQ works on the same CPU are called sequentially, so it is * done only by one CPU == current CPU. Other CPUs might
* safe to use __llist_del_first() here. If alloc_bulk() is * llist_add() and llist_del_all() in parallel.
* invoked by the initial prefill, there will be no running
* refill_work(), so __llist_del_first() is fine as well.
*
* In most cases, objects on free_by_rcu_ttrace are from the same CPU.
* If some objects come from other CPUs, it doesn't incur any
* harm because NUMA_NO_NODE means the preference for current
* numa node and it is not a guarantee.
*/ */
obj = __llist_del_first(&c->free_by_rcu_ttrace); obj = llist_del_first(&c->free_by_rcu_ttrace);
if (!obj) if (!obj)
break; break;
add_obj_to_free_list(c, obj); add_obj_to_free_list(c, obj);
...@@ -284,18 +278,23 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj) ...@@ -284,18 +278,23 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj)
/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
* Nothing races to add to free_by_rcu_ttrace list. * Nothing races to add to free_by_rcu_ttrace list.
*/ */
__llist_add(llnode, &c->free_by_rcu_ttrace); llist_add(llnode, &c->free_by_rcu_ttrace);
} }
static void do_call_rcu_ttrace(struct bpf_mem_cache *c) static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
{ {
struct llist_node *llnode, *t; struct llist_node *llnode, *t;
if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
if (unlikely(READ_ONCE(c->draining))) {
llnode = llist_del_all(&c->free_by_rcu_ttrace);
free_all(llnode, !!c->percpu_size);
}
return; return;
}
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu_ttrace)) llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
/* There is no concurrent __llist_add(waiting_for_gp_ttrace) access. /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
* It doesn't race with llist_del_all either. * It doesn't race with llist_del_all either.
* But there could be two concurrent llist_del_all(waiting_for_gp_ttrace): * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
...@@ -318,10 +317,13 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c) ...@@ -318,10 +317,13 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
static void free_bulk(struct bpf_mem_cache *c) static void free_bulk(struct bpf_mem_cache *c)
{ {
struct bpf_mem_cache *tgt = c->tgt;
struct llist_node *llnode, *t; struct llist_node *llnode, *t;
unsigned long flags; unsigned long flags;
int cnt; int cnt;
WARN_ON_ONCE(tgt->unit_size != c->unit_size);
do { do {
inc_active(c, &flags); inc_active(c, &flags);
llnode = __llist_del_first(&c->free_llist); llnode = __llist_del_first(&c->free_llist);
...@@ -331,13 +333,13 @@ static void free_bulk(struct bpf_mem_cache *c) ...@@ -331,13 +333,13 @@ static void free_bulk(struct bpf_mem_cache *c)
cnt = 0; cnt = 0;
dec_active(c, flags); dec_active(c, flags);
if (llnode) if (llnode)
enque_to_free(c, llnode); enque_to_free(tgt, llnode);
} while (cnt > (c->high_watermark + c->low_watermark) / 2); } while (cnt > (c->high_watermark + c->low_watermark) / 2);
/* and drain free_llist_extra */ /* and drain free_llist_extra */
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
enque_to_free(c, llnode); enque_to_free(tgt, llnode);
do_call_rcu_ttrace(c); do_call_rcu_ttrace(tgt);
} }
static void bpf_mem_refill(struct irq_work *work) static void bpf_mem_refill(struct irq_work *work)
...@@ -436,6 +438,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) ...@@ -436,6 +438,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->unit_size = unit_size; c->unit_size = unit_size;
c->objcg = objcg; c->objcg = objcg;
c->percpu_size = percpu_size; c->percpu_size = percpu_size;
c->tgt = c;
prefill_mem_cache(c, cpu); prefill_mem_cache(c, cpu);
} }
ma->cache = pc; ma->cache = pc;
...@@ -458,6 +461,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) ...@@ -458,6 +461,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c = &cc->cache[i]; c = &cc->cache[i];
c->unit_size = sizes[i]; c->unit_size = sizes[i];
c->objcg = objcg; c->objcg = objcg;
c->tgt = c;
prefill_mem_cache(c, cpu); prefill_mem_cache(c, cpu);
} }
} }
...@@ -476,7 +480,7 @@ static void drain_mem_cache(struct bpf_mem_cache *c) ...@@ -476,7 +480,7 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
* Except for waiting_for_gp_ttrace list, there are no concurrent operations * Except for waiting_for_gp_ttrace list, there are no concurrent operations
* on these lists, so it is safe to use __llist_del_all(). * on these lists, so it is safe to use __llist_del_all().
*/ */
free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu); free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
free_all(__llist_del_all(&c->free_llist), percpu); free_all(__llist_del_all(&c->free_llist), percpu);
free_all(__llist_del_all(&c->free_llist_extra), percpu); free_all(__llist_del_all(&c->free_llist_extra), percpu);
...@@ -601,8 +605,10 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c) ...@@ -601,8 +605,10 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)
local_irq_save(flags); local_irq_save(flags);
if (local_inc_return(&c->active) == 1) { if (local_inc_return(&c->active) == 1) {
llnode = __llist_del_first(&c->free_llist); llnode = __llist_del_first(&c->free_llist);
if (llnode) if (llnode) {
cnt = --c->free_cnt; cnt = --c->free_cnt;
*(struct bpf_mem_cache **)llnode = c;
}
} }
local_dec(&c->active); local_dec(&c->active);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -626,6 +632,12 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) ...@@ -626,6 +632,12 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
BUILD_BUG_ON(LLIST_NODE_SZ > 8); BUILD_BUG_ON(LLIST_NODE_SZ > 8);
/*
* Remember bpf_mem_cache that allocated this object.
* The hint is not accurate.
*/
c->tgt = *(struct bpf_mem_cache **)llnode;
local_irq_save(flags); local_irq_save(flags);
if (local_inc_return(&c->active) == 1) { if (local_inc_return(&c->active) == 1) {
__llist_add(llnode, &c->free_llist); __llist_add(llnode, &c->free_llist);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment