Commit 60911970 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'Subskeleton support for BPF librariesThread-Topic: [PATCH bpf-next v4 0/5'

Delyan Kratunov says:

====================

In the quest for ever more modularity, a new need has arisen - the ability to
access data associated with a BPF library from a corresponding userspace library.
The catch is that we don't want the userspace library to know about the structure of the
final BPF object that the BPF library is linked into.

In pursuit of this modularity, this patch series introduces *subskeletons.*
Subskeletons are similar in use and design to skeletons with a couple of differences:

1. The generated storage types do not rely on contiguous storage for the library's
variables because they may be interspersed randomly throughout the final BPF object's sections.

2. Subskeletons do not own objects and instead require a loaded bpf_object* to
be passed at runtime in order to be initialized. By extension, symbols are resolved at
runtime by parsing the final object's BTF.

3. Subskeletons allow access to all global variables, programs, and custom maps. They also expose
the internal maps *of the final object*. This allows bpf_var_skeleton objects to contain a bpf_map**
instead of a section name.

Changes since v3:
 - Re-add key/value type lookup for legacy user maps (fixing btf test)
 - Minor cleanups (missed sanitize_identifier call, error messages, formatting)

Changes since v2:
 - Reuse SEC_NAME strict mode flag
 - Init bpf_map->btf_value_type_id on open for internal maps *and* user BTF maps
 - Test custom section names (.data.foo) and overlapping kconfig externs between the final object and the library
 - Minor review comments in gen.c & libbpf.c

Changes since v1:
 - Introduced new strict mode knob for single-routine-in-.text compatibility behavior, which
   disproportionately affects library objects. bpftool works in 1.0 mode so subskeleton generation
   doesn't have to worry about this now.
 - Made bpf_map_btf_value_type_id available earlier and used it wherever applicable.
 - Refactoring in bpftool gen.c per review comments.
 - Subskels now use typeof() for array and func proto globals to avoid the need for runtime split btf.
 - Expanded the subskeleton test to include arrays, custom maps, extern maps, weak symbols, and kconfigs.
 - selftests/bpf/Makefile now generates a subskel.h for every skel.h it would make.

For reference, here is a shortened subskeleton header:

#ifndef __TEST_SUBSKELETON_LIB_SUBSKEL_H__
#define __TEST_SUBSKELETON_LIB_SUBSKEL_H__

struct test_subskeleton_lib {
	struct bpf_object *obj;
	struct bpf_object_subskeleton *subskel;
	struct {
		struct bpf_map *map2;
		struct bpf_map *map1;
		struct bpf_map *data;
		struct bpf_map *rodata;
		struct bpf_map *bss;
		struct bpf_map *kconfig;
	} maps;
	struct {
		struct bpf_program *lib_perf_handler;
	} progs;
	struct test_subskeleton_lib__data {
		int *var6;
		int *var2;
		int *var5;
	} data;
	struct test_subskeleton_lib__rodata {
		int *var1;
	} rodata;
	struct test_subskeleton_lib__bss {
		struct {
			int var3_1;
			__s64 var3_2;
		} *var3;
		int *libout1;
		typeof(int[4]) *var4;
		typeof(int (*)()) *fn_ptr;
	} bss;
	struct test_subskeleton_lib__kconfig {
		_Bool *CONFIG_BPF_SYSCALL;
	} kconfig;

static inline struct test_subskeleton_lib *
test_subskeleton_lib__open(const struct bpf_object *src)
{
	struct test_subskeleton_lib *obj;
	struct bpf_object_subskeleton *s;
	int err;

	...
	s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));
	...

	s->var_cnt = 9;
	...

	s->vars[0].name = "var6";
	s->vars[0].map = &obj->maps.data;
	s->vars[0].addr = (void**) &obj->data.var6;
  ...

	/* maps */
	...

	/* programs */
	s->prog_cnt = 1;
	...

	err = bpf_object__open_subskeleton(s);
  ...
	return obj;
}
#endif /* __TEST_SUBSKELETON_LIB_SUBSKEL_H__ */
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 5a5c11ee 3cccbaa0
......@@ -25,6 +25,7 @@ GEN COMMANDS
| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*]
| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
| **bpftool** **gen help**
......@@ -150,6 +151,30 @@ DESCRIPTION
(non-read-only) data from userspace, with same simplicity
as for BPF side.
**bpftool gen subskeleton** *FILE*
Generate BPF subskeleton C header file for a given *FILE*.
Subskeletons are similar to skeletons, except they do not own
the corresponding maps, programs, or global variables. They
require that the object file used to generate them is already
loaded into a *bpf_object* by some other means.
This functionality is useful when a library is included into a
larger BPF program. A subskeleton for the library would have
access to all objects and globals defined in it, without
having to know about the larger program.
Consequently, there are only two functions defined
for subskeletons:
- **example__open(bpf_object\*)**
Instantiates a subskeleton from an already opened (but not
necessarily loaded) **bpf_object**.
- **example__destroy()**
Frees the storage for the subskeleton but *does not* unload
any BPF programs or maps.
**bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
Generate a minimum BTF file as *OUTPUT*, derived from a given
*INPUT* BTF file, containing all needed BTF types so one, or
......
......@@ -1003,13 +1003,25 @@ _bpftool()
;;
esac
;;
subskeleton)
case $prev in
$command)
_filedir
return 0
;;
*)
_bpftool_once_attr 'name'
return 0
;;
esac
;;
min_core_btf)
_filedir
return 0
;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'object skeleton help min_core_btf' -- "$cur" ) )
COMPREPLY=( $( compgen -W 'object skeleton subskeleton help min_core_btf' -- "$cur" ) )
;;
esac
;;
......
This diff is collapsed.
......@@ -1517,6 +1517,9 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
return strdup(map_name);
}
static int
bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
const char *real_name, int sec_idx, void *data, size_t data_sz)
......@@ -1564,6 +1567,9 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
return err;
}
/* failures are fine because of maps like .rodata.str1.1 */
(void) bpf_map_find_btf_info(obj, map);
if (data)
memcpy(map->mmaped, data, data_sz);
......@@ -2046,6 +2052,9 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
}
memcpy(&map->def, def, sizeof(struct bpf_map_def));
}
/* btf info may not exist but fill it in if it does exist */
(void) bpf_map_find_btf_info(obj, map);
}
return 0;
}
......@@ -2534,6 +2543,10 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
fill_map_from_def(map->inner_map, &inner_def);
}
err = bpf_map_find_btf_info(obj, map);
if (err)
return err;
return 0;
}
......@@ -3832,7 +3845,14 @@ static bool prog_is_subprog(const struct bpf_object *obj,
* .text programs are subprograms (even if they are not called from
* other programs), because libbpf never explicitly supported mixing
* SEC()-designated BPF programs and .text entry-point BPF programs.
*
* In libbpf 1.0 strict mode, we always consider .text
* programs to be subprograms.
*/
if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
return prog->sec_idx == obj->efile.text_shndx;
return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
}
......@@ -4866,7 +4886,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map__is_struct_ops(map))
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
if (obj->btf && btf__fd(obj->btf) >= 0) {
create_attr.btf_fd = btf__fd(obj->btf);
create_attr.btf_key_type_id = map->btf_key_type_id;
create_attr.btf_value_type_id = map->btf_value_type_id;
......@@ -11969,6 +11989,49 @@ int libbpf_num_possible_cpus(void)
return tmp_cpus;
}
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
size_t map_cnt)
{
int i;
for (i = 0; i < map_cnt; i++) {
struct bpf_map **map = maps[i].map;
const char *name = maps[i].name;
void **mmaped = maps[i].mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
pr_warn("failed to find skeleton map '%s'\n", name);
return -ESRCH;
}
/* externs shouldn't be pre-setup from user code */
if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
*mmaped = (*map)->mmaped;
}
return 0;
}
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
size_t prog_cnt)
{
int i;
for (i = 0; i < prog_cnt; i++) {
struct bpf_program **prog = progs[i].prog;
const char *name = progs[i].name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
pr_warn("failed to find skeleton program '%s'\n", name);
return -ESRCH;
}
}
return 0;
}
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
......@@ -11976,7 +12039,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
.object_name = s->name,
);
struct bpf_object *obj;
int i, err;
int err;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
......@@ -11999,37 +12062,91 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map **map = s->maps[i].map;
const char *name = s->maps[i].name;
void **mmaped = s->maps[i].mmaped;
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
pr_warn("failed to find skeleton map '%s'\n", name);
return libbpf_err(-ESRCH);
}
return 0;
}
/* externs shouldn't be pre-setup from user code */
if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
*mmaped = (*map)->mmaped;
int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
{
int err, len, var_idx, i;
const char *var_name;
const struct bpf_map *map;
struct btf *btf;
__u32 map_type_id;
const struct btf_type *map_type, *var_type;
const struct bpf_var_skeleton *var_skel;
struct btf_var_secinfo *var;
if (!s->obj)
return libbpf_err(-EINVAL);
btf = bpf_object__btf(s->obj);
if (!btf) {
pr_warn("subskeletons require BTF at runtime (object %s)\n",
bpf_object__name(s->obj));
return libbpf_err(-errno);
}
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program **prog = s->progs[i].prog;
const char *name = s->progs[i].name;
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
pr_warn("failed to find skeleton program '%s'\n", name);
return libbpf_err(-ESRCH);
}
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
var_skel = &s->vars[var_idx];
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
if (!btf_is_datasec(map_type)) {
pr_warn("type for map '%1$s' is not a datasec: %2$s",
bpf_map__name(map),
__btf_kind_str(btf_kind(map_type)));
return libbpf_err(-EINVAL);
}
len = btf_vlen(map_type);
var = btf_var_secinfos(map_type);
for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
var_name = btf__name_by_offset(btf, var_type->name_off);
if (strcmp(var_name, var_skel->name) == 0) {
*var_skel->addr = map->mmaped + var->offset;
break;
}
}
}
return 0;
}
void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
{
if (!s)
return;
free(s->maps);
free(s->progs);
free(s->vars);
free(s);
}
int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
{
int i, err;
......
......@@ -1312,6 +1312,35 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
struct bpf_var_skeleton {
const char *name;
struct bpf_map **map;
void **addr;
};
struct bpf_object_subskeleton {
size_t sz; /* size of this struct, for forward/backward compatibility */
const struct bpf_object *obj;
int map_cnt;
int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
struct bpf_map_skeleton *maps;
int prog_cnt;
int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
struct bpf_prog_skeleton *progs;
int var_cnt;
int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */
struct bpf_var_skeleton *vars;
};
LIBBPF_API int
bpf_object__open_subskeleton(struct bpf_object_subskeleton *s);
LIBBPF_API void
bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
struct gen_loader_opts {
size_t sz; /* size of this struct, for forward/backward compatiblity */
const char *data;
......
......@@ -442,6 +442,8 @@ LIBBPF_0.7.0 {
LIBBPF_0.8.0 {
global:
bpf_object__destroy_subskeleton;
bpf_object__open_subskeleton;
libbpf_register_prog_handler;
libbpf_unregister_prog_handler;
bpf_program__attach_kprobe_multi_opts;
......
......@@ -54,6 +54,10 @@ enum libbpf_strict_mode {
*
* Note, in this mode the program pin path will be based on the
* function name instead of section name.
*
* Additionally, routines in the .text section are always considered
* sub-programs. Legacy behavior allows for a single routine in .text
* to be a program.
*/
LIBBPF_STRICT_SEC_NAME = 0x04,
/*
......
......@@ -31,6 +31,7 @@ test_tcp_check_syncookie_user
test_sysctl
xdping
test_cpp
*.subskel.h
*.skel.h
*.lskel.h
/no_alu32
......
......@@ -327,7 +327,13 @@ endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
linked_vars.skel.h linked_maps.skel.h \
test_subskeleton.skel.h test_subskeleton_lib.skel.h
# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
# but that's created as a side-effect of the skel.h generation.
test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
......@@ -405,6 +411,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$(@:.skel.h=.subskel.h)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
......@@ -422,6 +429,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
endif
# ensure we set up tests.h header generation rule just once
......@@ -559,6 +567,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "test_subskeleton.skel.h"
#include "test_subskeleton_lib.subskel.h"
static void subskeleton_lib_setup(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return;
*lib->rodata.var1 = 1;
*lib->data.var2 = 2;
lib->bss.var3->var3_1 = 3;
lib->bss.var3->var3_2 = 4;
test_subskeleton_lib__destroy(lib);
}
static int subskeleton_lib_subresult(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
int result;
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return -EINVAL;
result = *lib->bss.libout1;
ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult");
ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler");
ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler),
"lib_perf_handler", "program name");
ASSERT_OK_PTR(lib->maps.map1, "map1");
ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name");
ASSERT_EQ(*lib->data.var5, 5, "__weak var5");
ASSERT_EQ(*lib->data.var6, 6, "extern var6");
ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL");
test_subskeleton_lib__destroy(lib);
return result;
}
void test_subskeleton(void)
{
int err, result;
struct test_subskeleton *skel;
skel = test_subskeleton__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->rodata->rovar1 = 10;
skel->rodata->var1 = 1;
subskeleton_lib_setup(skel->obj);
err = test_subskeleton__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = test_subskeleton__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
result = subskeleton_lib_subresult(skel->obj) * 10;
ASSERT_EQ(skel->bss->out1, result, "unexpected calculation");
cleanup:
test_subskeleton__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read, compiler may assume 0 otherwise */
const volatile int rovar1;
int out1;
/* Override weak symbol in test_subskeleton_lib */
int var5 = 5;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
extern int lib_routine(void);
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
(void) CONFIG_BPF_SYSCALL;
out1 = lib_routine() * rovar1;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read */
const volatile int var1;
volatile int var2 = 1;
struct {
int var3_1;
__s64 var3_2;
} var3;
int libout1;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
int var4[4];
__weak int var5 SEC(".data");
/* Fully contained within library extern-and-definition */
extern int var6;
int var7 SEC(".data.custom");
int (*fn_ptr)(void);
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map1 SEC(".maps");
extern struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
int lib_routine(void)
{
__u32 key = 1, value = 2;
(void) CONFIG_BPF_SYSCALL;
bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
return libout1;
}
SEC("perf_event")
int lib_perf_handler(struct pt_regs *ctx)
{
return 0;
}
char LICENSE[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int var6 = 6;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
char LICENSE[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment