Commit 196e8ca7 authored by Daniel Borkmann's avatar Daniel Borkmann

bpf: Switch bpf_map_{area_alloc,area_mmapable_alloc}() to u64 size

Given we recently extended the original bpf_map_area_alloc() helper in
commit fc970227 ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY"),
we need to apply the same logic as in ff1c08e1 ("bpf: Change size
to u64 for bpf_map_{area_alloc, charge_init}()"). To avoid conflicts,
extend it for bpf-next.
Reported-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 91e6015b
...@@ -794,12 +794,12 @@ void bpf_map_put_with_uref(struct bpf_map *map); ...@@ -794,12 +794,12 @@ void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map); void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst, void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src); struct bpf_map_memory *src);
void *bpf_map_area_alloc(size_t size, int numa_node); void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(size_t size, int numa_node); void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base); void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
......
...@@ -128,7 +128,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) ...@@ -128,7 +128,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map; return map;
} }
static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable) static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
{ {
/* We really just want to fail instead of triggering OOM killer /* We really just want to fail instead of triggering OOM killer
* under memory pressure, therefore we set __GFP_NORETRY to kmalloc, * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
...@@ -143,6 +143,9 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable) ...@@ -143,6 +143,9 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable)
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area; void *area;
if (size >= SIZE_MAX)
return NULL;
/* kmalloc()'ed memory can't be mmap()'ed */ /* kmalloc()'ed memory can't be mmap()'ed */
if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
...@@ -160,12 +163,12 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable) ...@@ -160,12 +163,12 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable)
flags, __builtin_return_address(0)); flags, __builtin_return_address(0));
} }
void *bpf_map_area_alloc(size_t size, int numa_node) void *bpf_map_area_alloc(u64 size, int numa_node)
{ {
return __bpf_map_area_alloc(size, numa_node, false); return __bpf_map_area_alloc(size, numa_node, false);
} }
void *bpf_map_area_mmapable_alloc(size_t size, int numa_node) void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
{ {
return __bpf_map_area_alloc(size, numa_node, true); return __bpf_map_area_alloc(size, numa_node, true);
} }
...@@ -214,7 +217,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) ...@@ -214,7 +217,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
atomic_long_sub(pages, &user->locked_vm); atomic_long_sub(pages, &user->locked_vm);
} }
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
{ {
u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
struct user_struct *user; struct user_struct *user;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment