Commit a2c83fff authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

ebpf: constify various function pointer structs

We can move bpf_map_ops and bpf_verifier_ops and other structs into ro
section, bpf_map_type_list and bpf_prog_type_list into read mostly.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f91fe17e
...@@ -32,13 +32,13 @@ struct bpf_map { ...@@ -32,13 +32,13 @@ struct bpf_map {
u32 key_size; u32 key_size;
u32 value_size; u32 value_size;
u32 max_entries; u32 max_entries;
struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
struct work_struct work; struct work_struct work;
}; };
struct bpf_map_type_list { struct bpf_map_type_list {
struct list_head list_node; struct list_head list_node;
struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
enum bpf_map_type type; enum bpf_map_type type;
}; };
...@@ -109,7 +109,7 @@ struct bpf_verifier_ops { ...@@ -109,7 +109,7 @@ struct bpf_verifier_ops {
struct bpf_prog_type_list { struct bpf_prog_type_list {
struct list_head list_node; struct list_head list_node;
struct bpf_verifier_ops *ops; const struct bpf_verifier_ops *ops;
enum bpf_prog_type type; enum bpf_prog_type type;
}; };
...@@ -121,7 +121,7 @@ struct bpf_prog_aux { ...@@ -121,7 +121,7 @@ struct bpf_prog_aux {
atomic_t refcnt; atomic_t refcnt;
bool is_gpl_compatible; bool is_gpl_compatible;
enum bpf_prog_type prog_type; enum bpf_prog_type prog_type;
struct bpf_verifier_ops *ops; const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps; struct bpf_map **used_maps;
u32 used_map_cnt; u32 used_map_cnt;
struct bpf_prog *prog; struct bpf_prog *prog;
...@@ -138,8 +138,8 @@ struct bpf_prog *bpf_prog_get(u32 ufd); ...@@ -138,8 +138,8 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
/* verifier prototypes for helper functions called from eBPF programs */ /* verifier prototypes for helper functions called from eBPF programs */
extern struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto;
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */
...@@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map) ...@@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map)
kvfree(array); kvfree(array);
} }
static struct bpf_map_ops array_ops = { static const struct bpf_map_ops array_ops = {
.map_alloc = array_map_alloc, .map_alloc = array_map_alloc,
.map_free = array_map_free, .map_free = array_map_free,
.map_get_next_key = array_map_get_next_key, .map_get_next_key = array_map_get_next_key,
...@@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = { ...@@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = {
.map_delete_elem = array_map_delete_elem, .map_delete_elem = array_map_delete_elem,
}; };
static struct bpf_map_type_list tl = { static struct bpf_map_type_list array_type __read_mostly = {
.ops = &array_ops, .ops = &array_ops,
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
}; };
static int __init register_array_map(void) static int __init register_array_map(void)
{ {
bpf_register_map_type(&tl); bpf_register_map_type(&array_type);
return 0; return 0;
} }
late_initcall(register_array_map); late_initcall(register_array_map);
...@@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map) ...@@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map)
kfree(htab); kfree(htab);
} }
static struct bpf_map_ops htab_ops = { static const struct bpf_map_ops htab_ops = {
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = { ...@@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = {
.map_delete_elem = htab_map_delete_elem, .map_delete_elem = htab_map_delete_elem,
}; };
static struct bpf_map_type_list tl = { static struct bpf_map_type_list htab_type __read_mostly = {
.ops = &htab_ops, .ops = &htab_ops,
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
}; };
static int __init register_htab_map(void) static int __init register_htab_map(void)
{ {
bpf_register_map_type(&tl); bpf_register_map_type(&htab_type);
return 0; return 0;
} }
late_initcall(register_htab_map); late_initcall(register_htab_map);
...@@ -41,7 +41,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -41,7 +41,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return (unsigned long) value; return (unsigned long) value;
} }
struct bpf_func_proto bpf_map_lookup_elem_proto = { const struct bpf_func_proto bpf_map_lookup_elem_proto = {
.func = bpf_map_lookup_elem, .func = bpf_map_lookup_elem,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
...@@ -60,7 +60,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -60,7 +60,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return map->ops->map_update_elem(map, key, value, r4); return map->ops->map_update_elem(map, key, value, r4);
} }
struct bpf_func_proto bpf_map_update_elem_proto = { const struct bpf_func_proto bpf_map_update_elem_proto = {
.func = bpf_map_update_elem, .func = bpf_map_update_elem,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -80,7 +80,7 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -80,7 +80,7 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return map->ops->map_delete_elem(map, key); return map->ops->map_delete_elem(map, key);
} }
struct bpf_func_proto bpf_map_delete_elem_proto = { const struct bpf_func_proto bpf_map_delete_elem_proto = {
.func = bpf_map_delete_elem, .func = bpf_map_delete_elem,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
......
...@@ -1159,19 +1159,19 @@ static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type ...@@ -1159,19 +1159,19 @@ static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type
return false; return false;
} }
static struct bpf_verifier_ops sock_filter_ops = { static const struct bpf_verifier_ops sock_filter_ops = {
.get_func_proto = sock_filter_func_proto, .get_func_proto = sock_filter_func_proto,
.is_valid_access = sock_filter_is_valid_access, .is_valid_access = sock_filter_is_valid_access,
}; };
static struct bpf_prog_type_list tl = { static struct bpf_prog_type_list sock_filter_type __read_mostly = {
.ops = &sock_filter_ops, .ops = &sock_filter_ops,
.type = BPF_PROG_TYPE_SOCKET_FILTER, .type = BPF_PROG_TYPE_SOCKET_FILTER,
}; };
static int __init register_sock_filter_ops(void) static int __init register_sock_filter_ops(void)
{ {
bpf_register_prog_type(&tl); bpf_register_prog_type(&sock_filter_type);
return 0; return 0;
} }
late_initcall(register_sock_filter_ops); late_initcall(register_sock_filter_ops);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment