Commit 134fede4 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Daniel Borkmann

bpf: Relax max_entries check for most of the inner map types

Most of the maps do not use max_entries during verification time.
Thus, those map_meta_equal() do not need to enforce max_entries
when it is inserted as an inner map during runtime.  The max_entries
check is removed from the default implementation bpf_map_meta_equal().

The prog_array_map and xsk_map are exception.  Its map_gen_lookup
uses max_entries to generate inline lookup code.  Thus, they will
implement its own map_meta_equal() to enforce max_entries.
Since there are only two cases now, the max_entries check
is not refactored and stays in its own .c file.
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200828011813.1970516-1-kafai@fb.com
parent f4d05259
...@@ -487,6 +487,13 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) ...@@ -487,6 +487,13 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
vma->vm_pgoff + pgoff); vma->vm_pgoff + pgoff);
} }
static bool array_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1)
{
return meta0->max_entries == meta1->max_entries &&
bpf_map_meta_equal(meta0, meta1);
}
struct bpf_iter_seq_array_map_info { struct bpf_iter_seq_array_map_info {
struct bpf_map *map; struct bpf_map *map;
void *percpu_value_buf; void *percpu_value_buf;
...@@ -625,7 +632,7 @@ static const struct bpf_iter_seq_info iter_seq_info = { ...@@ -625,7 +632,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
static int array_map_btf_id; static int array_map_btf_id;
const struct bpf_map_ops array_map_ops = { const struct bpf_map_ops array_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = array_map_meta_equal,
.map_alloc_check = array_map_alloc_check, .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc, .map_alloc = array_map_alloc,
.map_free = array_map_free, .map_free = array_map_free,
......
...@@ -75,8 +75,7 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0, ...@@ -75,8 +75,7 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
return meta0->map_type == meta1->map_type && return meta0->map_type == meta1->map_type &&
meta0->key_size == meta1->key_size && meta0->key_size == meta1->key_size &&
meta0->value_size == meta1->value_size && meta0->value_size == meta1->value_size &&
meta0->map_flags == meta1->map_flags && meta0->map_flags == meta1->map_flags;
meta0->max_entries == meta1->max_entries;
} }
void *bpf_map_fd_get_ptr(struct bpf_map *map, void *bpf_map_fd_get_ptr(struct bpf_map *map,
......
...@@ -254,9 +254,16 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, ...@@ -254,9 +254,16 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
spin_unlock_bh(&map->lock); spin_unlock_bh(&map->lock);
} }
static bool xsk_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1)
{
return meta0->max_entries == meta1->max_entries &&
bpf_map_meta_equal(meta0, meta1);
}
static int xsk_map_btf_id; static int xsk_map_btf_id;
const struct bpf_map_ops xsk_map_ops = { const struct bpf_map_ops xsk_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = xsk_map_meta_equal,
.map_alloc = xsk_map_alloc, .map_alloc = xsk_map_alloc,
.map_free = xsk_map_free, .map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key, .map_get_next_key = xsk_map_get_next_key,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment