Commit ee53cbfb authored by Yafang Shao's avatar Yafang Shao Committed by Alexei Starovoitov

bpf: allow to disable bpf map memory accounting

We can simply set root memcg as the map's memcg to disable bpf memory
accounting. bpf_map_area_alloc is a little special as it gets the memcg
from current rather than from the map, so we need to disable GFP_ACCOUNT
specifically for it.
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Link: https://lore.kernel.org/r/20230210154734.4416-4-laoar.shao@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent ddef81b5
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/btf.h> #include <linux/btf.h>
#include <linux/rcupdate_trace.h> #include <linux/rcupdate_trace.h>
#include <linux/static_call.h> #include <linux/static_call.h>
#include <linux/memcontrol.h>
struct bpf_verifier_env; struct bpf_verifier_env;
struct bpf_verifier_log; struct bpf_verifier_log;
...@@ -2933,4 +2934,11 @@ static inline bool type_is_alloc(u32 type) ...@@ -2933,4 +2934,11 @@ static inline bool type_is_alloc(u32 type)
return type & MEM_ALLOC; return type & MEM_ALLOC;
} }
static inline gfp_t bpf_memcg_flags(gfp_t flags)
{
if (memcg_bpf_enabled())
return flags | __GFP_ACCOUNT;
return flags;
}
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */
...@@ -395,7 +395,8 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) ...@@ -395,7 +395,8 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
unit_size = size; unit_size = size;
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current(); if (memcg_bpf_enabled())
objcg = get_obj_cgroup_from_current();
#endif #endif
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
c = per_cpu_ptr(pc, cpu); c = per_cpu_ptr(pc, cpu);
......
...@@ -309,7 +309,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) ...@@ -309,7 +309,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
* __GFP_RETRY_MAYFAIL to avoid such situations. * __GFP_RETRY_MAYFAIL to avoid such situations.
*/ */
const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
unsigned int flags = 0; unsigned int flags = 0;
unsigned long align = 1; unsigned long align = 1;
void *area; void *area;
...@@ -418,7 +418,8 @@ static void bpf_map_save_memcg(struct bpf_map *map) ...@@ -418,7 +418,8 @@ static void bpf_map_save_memcg(struct bpf_map *map)
* So we have to check map->objcg for being NULL each time it's * So we have to check map->objcg for being NULL each time it's
* being used. * being used.
*/ */
map->objcg = get_obj_cgroup_from_current(); if (memcg_bpf_enabled())
map->objcg = get_obj_cgroup_from_current();
} }
static void bpf_map_release_memcg(struct bpf_map *map) static void bpf_map_release_memcg(struct bpf_map *map)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment