Commit ace2bee8 authored by Yafang Shao's avatar Yafang Shao Committed by Alexei Starovoitov

bpf: Make non-preallocated allocation low priority

GFP_ATOMIC doesn't cooperate well with memcg pressure so far, especially
if we allocate too much GFP_ATOMIC memory. For example, when we set the
memcg limit to limit a non-preallocated bpf memory, the GFP_ATOMIC can
easily break the memcg limit by force charge. So it is very dangerous to
use GFP_ATOMIC in non-preallocated case. One way to make it safe is to
remove __GFP_HIGH from GFP_ATOMIC, IOW, use (__GFP_ATOMIC |
__GFP_KSWAPD_RECLAIM) instead, then it will be limited if we allocate
too much memory. There's a plan to completely remove __GFP_ATOMIC in the
mm side[1], so let's use GFP_NOWAIT instead.

We introduced BPF_F_NO_PREALLOC is because full map pre-allocation is
too memory expensive for some cases. That means removing __GFP_HIGH
doesn't break the rule of BPF_F_NO_PREALLOC, but has the same goal with
it-avoiding issues caused by too much memory. So let's remove it.

This fix can also apply to other run-time allocations, for example, the
allocation in lpm trie, local storage and devmap. So let fix it
consistently over the bpf code

It also fixes a typo in the comment.

[1]. https://lore.kernel.org/linux-mm/163712397076.13692.4727608274002939094@noble.neil.brown.name/

Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: NeilBrown <neilb@suse.de>
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Link: https://lore.kernel.org/r/20220709154457.57379-2-laoar.shao@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 1d5f82d9
...@@ -845,7 +845,7 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, ...@@ -845,7 +845,7 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
struct bpf_dtab_netdev *dev; struct bpf_dtab_netdev *dev;
dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
GFP_ATOMIC | __GFP_NOWARN, GFP_NOWAIT | __GFP_NOWARN,
dtab->map.numa_node); dtab->map.numa_node);
if (!dev) if (!dev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
* *
* As regular device interrupt handlers and soft interrupts are forced into * As regular device interrupt handlers and soft interrupts are forced into
* thread context, the existing code which does * thread context, the existing code which does
* spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*(); * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
* just works. * just works.
* *
* In theory the BPF locks could be converted to regular spinlocks as well, * In theory the BPF locks could be converted to regular spinlocks as well,
...@@ -978,7 +978,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -978,7 +978,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
goto dec_count; goto dec_count;
} }
l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
GFP_ATOMIC | __GFP_NOWARN, GFP_NOWAIT | __GFP_NOWARN,
htab->map.numa_node); htab->map.numa_node);
if (!l_new) { if (!l_new) {
l_new = ERR_PTR(-ENOMEM); l_new = ERR_PTR(-ENOMEM);
...@@ -996,7 +996,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -996,7 +996,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
} else { } else {
/* alloc_percpu zero-fills */ /* alloc_percpu zero-fills */
pptr = bpf_map_alloc_percpu(&htab->map, size, 8, pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
GFP_ATOMIC | __GFP_NOWARN); GFP_NOWAIT | __GFP_NOWARN);
if (!pptr) { if (!pptr) {
kfree(l_new); kfree(l_new);
l_new = ERR_PTR(-ENOMEM); l_new = ERR_PTR(-ENOMEM);
......
...@@ -165,7 +165,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key, ...@@ -165,7 +165,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
} }
new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size),
__GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN, __GFP_ZERO | GFP_NOWAIT | __GFP_NOWARN,
map->numa_node); map->numa_node);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
......
...@@ -285,7 +285,7 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, ...@@ -285,7 +285,7 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
if (value) if (value)
size += trie->map.value_size; size += trie->map.value_size;
node = bpf_map_kmalloc_node(&trie->map, size, GFP_ATOMIC | __GFP_NOWARN, node = bpf_map_kmalloc_node(&trie->map, size, GFP_NOWAIT | __GFP_NOWARN,
trie->map.numa_node); trie->map.numa_node);
if (!node) if (!node)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment