Commit 60911970 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'Subskeleton support for BPF librariesThread-Topic: [PATCH bpf-next v4 0/5'

Delyan Kratunov says:

====================

In the quest for ever more modularity, a new need has arisen - the ability to
access data associated with a BPF library from a corresponding userspace library.
The catch is that we don't want the userspace library to know about the structure of the
final BPF object that the BPF library is linked into.

In pursuit of this modularity, this patch series introduces *subskeletons.*
Subskeletons are similar in use and design to skeletons with a couple of differences:

1. The generated storage types do not rely on contiguous storage for the library's
variables because they may be interspersed randomly throughout the final BPF object's sections.

2. Subskeletons do not own objects and instead require a loaded bpf_object* to
be passed at runtime in order to be initialized. By extension, symbols are resolved at
runtime by parsing the final object's BTF.

3. Subskeletons allow access to all global variables, programs, and custom maps. They also expose
the internal maps *of the final object*. This allows bpf_var_skeleton objects to contain a bpf_map**
instead of a section name.

Changes since v3:
 - Re-add key/value type lookup for legacy user maps (fixing btf test)
 - Minor cleanups (missed sanitize_identifier call, error messages, formatting)

Changes since v2:
 - Reuse SEC_NAME strict mode flag
 - Init bpf_map->btf_value_type_id on open for internal maps *and* user BTF maps
 - Test custom section names (.data.foo) and overlapping kconfig externs between the final object and the library
 - Minor review comments in gen.c & libbpf.c

Changes since v1:
 - Introduced new strict mode knob for single-routine-in-.text compatibility behavior, which
   disproportionately affects library objects. bpftool works in 1.0 mode so subskeleton generation
   doesn't have to worry about this now.
 - Made bpf_map_btf_value_type_id available earlier and used it wherever applicable.
 - Refactoring in bpftool gen.c per review comments.
 - Subskels now use typeof() for array and func proto globals to avoid the need for runtime split btf.
 - Expanded the subskeleton test to include arrays, custom maps, extern maps, weak symbols, and kconfigs.
 - selftests/bpf/Makefile now generates a subskel.h for every skel.h it would make.

For reference, here is a shortened subskeleton header:

#ifndef __TEST_SUBSKELETON_LIB_SUBSKEL_H__
#define __TEST_SUBSKELETON_LIB_SUBSKEL_H__

struct test_subskeleton_lib {
	struct bpf_object *obj;
	struct bpf_object_subskeleton *subskel;
	struct {
		struct bpf_map *map2;
		struct bpf_map *map1;
		struct bpf_map *data;
		struct bpf_map *rodata;
		struct bpf_map *bss;
		struct bpf_map *kconfig;
	} maps;
	struct {
		struct bpf_program *lib_perf_handler;
	} progs;
	struct test_subskeleton_lib__data {
		int *var6;
		int *var2;
		int *var5;
	} data;
	struct test_subskeleton_lib__rodata {
		int *var1;
	} rodata;
	struct test_subskeleton_lib__bss {
		struct {
			int var3_1;
			__s64 var3_2;
		} *var3;
		int *libout1;
		typeof(int[4]) *var4;
		typeof(int (*)()) *fn_ptr;
	} bss;
	struct test_subskeleton_lib__kconfig {
		_Bool *CONFIG_BPF_SYSCALL;
	} kconfig;

static inline struct test_subskeleton_lib *
test_subskeleton_lib__open(const struct bpf_object *src)
{
	struct test_subskeleton_lib *obj;
	struct bpf_object_subskeleton *s;
	int err;

	...
	s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));
	...

	s->var_cnt = 9;
	...

	s->vars[0].name = "var6";
	s->vars[0].map = &obj->maps.data;
	s->vars[0].addr = (void**) &obj->data.var6;
  ...

	/* maps */
	...

	/* programs */
	s->prog_cnt = 1;
	...

	err = bpf_object__open_subskeleton(s);
  ...
	return obj;
}
#endif /* __TEST_SUBSKELETON_LIB_SUBSKEL_H__ */
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 5a5c11ee 3cccbaa0
......@@ -25,6 +25,7 @@ GEN COMMANDS
| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*]
| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
| **bpftool** **gen help**
......@@ -150,6 +151,30 @@ DESCRIPTION
(non-read-only) data from userspace, with same simplicity
as for BPF side.
**bpftool gen subskeleton** *FILE*
Generate BPF subskeleton C header file for a given *FILE*.
Subskeletons are similar to skeletons, except they do not own
the corresponding maps, programs, or global variables. They
require that the object file used to generate them is already
loaded into a *bpf_object* by some other means.
This functionality is useful when a library is included into a
larger BPF program. A subskeleton for the library would have
access to all objects and globals defined in it, without
having to know about the larger program.
Consequently, there are only two functions defined
for subskeletons:
- **example__open(bpf_object\*)**
Instantiates a subskeleton from an already opened (but not
necessarily loaded) **bpf_object**.
- **example__destroy()**
Frees the storage for the subskeleton but *does not* unload
any BPF programs or maps.
**bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
Generate a minimum BTF file as *OUTPUT*, derived from a given
*INPUT* BTF file, containing all needed BTF types so one, or
......
......@@ -1003,13 +1003,25 @@ _bpftool()
;;
esac
;;
subskeleton)
case $prev in
$command)
_filedir
return 0
;;
*)
_bpftool_once_attr 'name'
return 0
;;
esac
;;
min_core_btf)
_filedir
return 0
;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'object skeleton help min_core_btf' -- "$cur" ) )
COMPREPLY=( $( compgen -W 'object skeleton subskeleton help min_core_btf' -- "$cur" ) )
;;
esac
;;
......
......@@ -64,11 +64,11 @@ static void get_obj_name(char *name, const char *file)
sanitize_identifier(name);
}
static void get_header_guard(char *guard, const char *obj_name)
static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
{
int i;
sprintf(guard, "__%s_SKEL_H__", obj_name);
sprintf(guard, "__%s_%s__", obj_name, suffix);
for (i = 0; guard[i]; i++)
guard[i] = toupper(guard[i]);
}
......@@ -231,6 +231,17 @@ static const struct btf_type *find_type_for_map(struct btf *btf, const char *map
return NULL;
}
static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
{
if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
return false;
if (!get_map_ident(map, buf, sz))
return false;
return true;
}
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
......@@ -247,12 +258,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
if (!bpf_map__is_internal(map))
continue;
if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
continue;
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
......@@ -280,6 +286,96 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
return err;
}
static bool btf_is_ptr_to_func_proto(const struct btf *btf,
const struct btf_type *v)
{
return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
}
static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
struct btf_dump *d;
struct bpf_map *map;
const struct btf_type *sec, *var;
const struct btf_var_secinfo *sec_var;
int i, err = 0, vlen;
char map_ident[256], sec_ident[256];
bool strip_mods = false, needs_typeof = false;
const char *sec_name, *var_name;
__u32 var_type_id;
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
if (!d)
return -errno;
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
if (!sec)
continue;
sec_name = btf__name_by_offset(btf, sec->name_off);
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
continue;
strip_mods = strcmp(sec_name, ".kconfig") != 0;
printf(" struct %s__%s {\n", obj_name, sec_ident);
sec_var = btf_var_secinfos(sec);
vlen = btf_vlen(sec);
for (i = 0; i < vlen; i++, sec_var++) {
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
.indent_level = 2,
.strip_mods = strip_mods,
/* we'll print the name separately */
.field_name = "",
);
var = btf__type_by_id(btf, sec_var->type);
var_name = btf__name_by_offset(btf, var->name_off);
var_type_id = var->type;
/* static variables are not exposed through BPF skeleton */
if (btf_var(var)->linkage == BTF_VAR_STATIC)
continue;
/* The datasec member has KIND_VAR but we want the
* underlying type of the variable (e.g. KIND_INT).
*/
var = skip_mods_and_typedefs(btf, var->type, NULL);
printf("\t\t");
/* Func and array members require special handling.
* Instead of producing `typename *var`, they produce
* `typeof(typename) *var`. This allows us to keep a
* similar syntax where the identifier is just prefixed
* by *, allowing us to ignore C declaration minutiae.
*/
needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
if (needs_typeof)
printf("typeof(");
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
if (err)
goto out;
if (needs_typeof)
printf(")");
printf(" *%s;\n", var_name);
}
printf(" } %s;\n", sec_ident);
}
out:
btf_dump__free(d);
return err;
}
static void codegen(const char *template, ...)
{
const char *src, *end;
......@@ -389,11 +485,7 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
", obj_name);
bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_internal(map))
continue;
if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
continue;
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
......@@ -608,11 +700,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
const void *mmap_data = NULL;
size_t mmap_size = 0;
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
if (!bpf_map__is_internal(map) ||
!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
codegen("\
......@@ -671,11 +759,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
bpf_object__for_each_map(map, obj) {
const char *mmap_flags;
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
if (!bpf_map__is_internal(map) ||
!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
......@@ -727,10 +811,95 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
return err;
}
static void
codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
{
struct bpf_map *map;
char ident[256];
size_t i;
if (!map_cnt)
return;
codegen("\
\n\
\n\
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\
goto err; \n\
",
map_cnt
);
i = 0;
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
codegen("\
\n\
\n\
s->maps[%zu].name = \"%s\"; \n\
s->maps[%zu].map = &obj->maps.%s; \n\
",
i, bpf_map__name(map), i, ident);
/* memory-mapped internal maps */
if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident);
}
i++;
}
}
static void
codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
{
struct bpf_program *prog;
int i;
if (!prog_cnt)
return;
codegen("\
\n\
\n\
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\
goto err; \n\
",
prog_cnt
);
i = 0;
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
\n\
s->progs[%1$zu].name = \"%2$s\"; \n\
s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
",
i, bpf_program__name(prog));
if (populate_links) {
codegen("\
\n\
s->progs[%1$zu].link = &obj->links.%2$s;\n\
",
i, bpf_program__name(prog));
}
i++;
}
}
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
......@@ -821,7 +990,7 @@ static int do_skeleton(int argc, char **argv)
prog_cnt++;
}
get_header_guard(header_guard, obj_name);
get_header_guard(header_guard, obj_name, "SKEL_H");
if (use_loader) {
codegen("\
\n\
......@@ -1024,66 +1193,10 @@ static int do_skeleton(int argc, char **argv)
",
obj_name
);
if (map_cnt) {
codegen("\
\n\
\n\
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\
goto err; \n\
",
map_cnt
);
i = 0;
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
codegen("\
\n\
\n\
s->maps[%zu].name = \"%s\"; \n\
s->maps[%zu].map = &obj->maps.%s; \n\
",
i, bpf_map__name(map), i, ident);
/* memory-mapped internal maps */
if (bpf_map__is_internal(map) &&
(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident);
}
i++;
}
}
if (prog_cnt) {
codegen("\
\n\
\n\
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\
goto err; \n\
",
prog_cnt
);
i = 0;
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
\n\
s->progs[%1$zu].name = \"%2$s\"; \n\
s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
s->progs[%1$zu].link = &obj->links.%2$s;\n\
",
i, bpf_program__name(prog));
i++;
}
}
codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
codegen("\
\n\
\n\
......@@ -1141,6 +1254,311 @@ static int do_skeleton(int argc, char **argv)
return err;
}
/* Subskeletons are like skeletons, except they don't own the bpf_object,
* associated maps, links, etc. Instead, they know about the existence of
* variables, maps, programs and are able to find their locations
* _at runtime_ from an already loaded bpf_object.
*
* This allows for library-like BPF objects to have userspace counterparts
* with access to their own items without having to know anything about the
* final BPF object that the library was linked into.
*/
static int do_subskeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
const char *file, *var_name;
char ident[256];
int fd, err = -1, map_type_id;
const struct bpf_map *map;
struct bpf_program *prog;
struct btf *btf;
const struct btf_type *map_type, *var_type;
const struct btf_var_secinfo *var;
struct stat st;
if (!REQ_ARGS(1)) {
usage();
return -1;
}
file = GET_ARG();
while (argc) {
if (!REQ_ARGS(2))
return -1;
if (is_prefix(*argv, "name")) {
NEXT_ARG();
if (obj_name[0] != '\0') {
p_err("object name already specified");
return -1;
}
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
} else {
p_err("unknown arg %s", *argv);
return -1;
}
NEXT_ARG();
}
if (argc) {
p_err("extra unknown arguments");
return -1;
}
if (use_loader) {
p_err("cannot use loader for subskeletons");
return -1;
}
if (stat(file, &st)) {
p_err("failed to stat() %s: %s", file, strerror(errno));
return -1;
}
file_sz = st.st_size;
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
fd = open(file, O_RDONLY);
if (fd < 0) {
p_err("failed to open() %s: %s", file, strerror(errno));
return -1;
}
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
if (obj_data == MAP_FAILED) {
obj_data = NULL;
p_err("failed to mmap() %s: %s", file, strerror(errno));
goto out;
}
if (obj_name[0] == '\0')
get_obj_name(obj_name, file);
/* The empty object name allows us to use bpf_map__name and produce
* ELF section names out of it. (".data" instead of "obj.data")
*/
opts.object_name = "";
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
if (!obj) {
char err_buf[256];
libbpf_strerror(errno, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
obj = NULL;
goto out;
}
btf = bpf_object__btf(obj);
if (!btf) {
err = -1;
p_err("need btf type information for %s", obj_name);
goto out;
}
bpf_object__for_each_program(prog, obj) {
prog_cnt++;
}
/* First, count how many variables we have to find.
* We need this in advance so the subskel can allocate the right
* amount of storage.
*/
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
/* Also count all maps that have a name */
map_cnt++;
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
map_type_id = bpf_map__btf_value_type_id(map);
if (map_type_id <= 0) {
err = map_type_id;
goto out;
}
map_type = btf__type_by_id(btf, map_type_id);
var = btf_var_secinfos(map_type);
len = btf_vlen(map_type);
for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
continue;
var_cnt++;
}
}
get_header_guard(header_guard, obj_name, "SUBSKEL_H");
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
/* THIS FILE IS AUTOGENERATED! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <errno.h> \n\
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
struct %1$s { \n\
struct bpf_object *obj; \n\
struct bpf_object_subskeleton *subskel; \n\
", obj_name, header_guard);
if (map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
printf("\t\tstruct bpf_program *%s;\n",
bpf_program__name(prog));
}
printf("\t} progs;\n");
}
err = codegen_subskel_datasecs(obj, obj_name);
if (err)
goto out;
/* emit code that will allocate enough storage for all symbols */
codegen("\
\n\
\n\
#ifdef __cplusplus \n\
static inline struct %1$s *open(const struct bpf_object *src);\n\
static inline void destroy(struct %1$s *skel); \n\
#endif /* __cplusplus */ \n\
}; \n\
\n\
static inline void \n\
%1$s__destroy(struct %1$s *skel) \n\
{ \n\
if (!skel) \n\
return; \n\
if (skel->subskel) \n\
bpf_object__destroy_subskeleton(skel->subskel);\n\
free(skel); \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open(const struct bpf_object *src) \n\
{ \n\
struct %1$s *obj; \n\
struct bpf_object_subskeleton *s; \n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) { \n\
errno = ENOMEM; \n\
goto err; \n\
} \n\
s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
if (!s) { \n\
errno = ENOMEM; \n\
goto err; \n\
} \n\
s->sz = sizeof(*s); \n\
s->obj = src; \n\
s->var_skel_sz = sizeof(*s->vars); \n\
obj->subskel = s; \n\
\n\
/* vars */ \n\
s->var_cnt = %2$d; \n\
s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
if (!s->vars) { \n\
errno = ENOMEM; \n\
goto err; \n\
} \n\
",
obj_name, var_cnt
);
/* walk through each symbol and emit the runtime representation */
bpf_object__for_each_map(map, obj) {
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
map_type_id = bpf_map__btf_value_type_id(map);
if (map_type_id <= 0)
/* skip over internal maps with no type*/
continue;
map_type = btf__type_by_id(btf, map_type_id);
var = btf_var_secinfos(map_type);
len = btf_vlen(map_type);
for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
var_name = btf__name_by_offset(btf, var_type->name_off);
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
continue;
/* Note that we use the dot prefix in .data as the
* field access operator i.e. maps%s becomes maps.data
*/
codegen("\
\n\
\n\
s->vars[%3$d].name = \"%1$s\"; \n\
s->vars[%3$d].map = &obj->maps.%2$s; \n\
s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
", var_name, ident, var_idx);
var_idx++;
}
}
codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
codegen("\
\n\
\n\
err = bpf_object__open_subskeleton(s); \n\
if (err) \n\
goto err; \n\
\n\
return obj; \n\
err: \n\
%1$s__destroy(obj); \n\
errno = -err; \n\
return NULL; \n\
} \n\
\n\
#ifdef __cplusplus \n\
struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
#endif /* __cplusplus */ \n\
\n\
#endif /* %2$s */ \n\
",
obj_name, header_guard);
err = 0;
out:
bpf_object__close(obj);
if (obj_data)
munmap(obj_data, mmap_sz);
close(fd);
return err;
}
static int do_object(int argc, char **argv)
{
struct bpf_linker *linker;
......@@ -1192,6 +1610,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
" %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
" %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
" %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
" %1$s %2$s help\n"
"\n"
......@@ -1788,6 +2207,7 @@ static int do_min_core_btf(int argc, char **argv)
static const struct cmd cmds[] = {
{ "object", do_object },
{ "skeleton", do_skeleton },
{ "subskeleton", do_subskeleton },
{ "min_core_btf", do_min_core_btf},
{ "help", do_help },
{ 0 }
......
......@@ -1517,6 +1517,9 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
return strdup(map_name);
}
static int
bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
const char *real_name, int sec_idx, void *data, size_t data_sz)
......@@ -1564,6 +1567,9 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
return err;
}
/* failures are fine because of maps like .rodata.str1.1 */
(void) bpf_map_find_btf_info(obj, map);
if (data)
memcpy(map->mmaped, data, data_sz);
......@@ -2046,6 +2052,9 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
}
memcpy(&map->def, def, sizeof(struct bpf_map_def));
}
/* btf info may not exist but fill it in if it does exist */
(void) bpf_map_find_btf_info(obj, map);
}
return 0;
}
......@@ -2534,6 +2543,10 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
fill_map_from_def(map->inner_map, &inner_def);
}
err = bpf_map_find_btf_info(obj, map);
if (err)
return err;
return 0;
}
......@@ -3832,7 +3845,14 @@ static bool prog_is_subprog(const struct bpf_object *obj,
* .text programs are subprograms (even if they are not called from
* other programs), because libbpf never explicitly supported mixing
* SEC()-designated BPF programs and .text entry-point BPF programs.
*
* In libbpf 1.0 strict mode, we always consider .text
* programs to be subprograms.
*/
if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
return prog->sec_idx == obj->efile.text_shndx;
return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
}
......@@ -4866,7 +4886,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map__is_struct_ops(map))
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
if (obj->btf && btf__fd(obj->btf) >= 0) {
create_attr.btf_fd = btf__fd(obj->btf);
create_attr.btf_key_type_id = map->btf_key_type_id;
create_attr.btf_value_type_id = map->btf_value_type_id;
......@@ -11969,6 +11989,49 @@ int libbpf_num_possible_cpus(void)
return tmp_cpus;
}
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
size_t map_cnt)
{
int i;
for (i = 0; i < map_cnt; i++) {
struct bpf_map **map = maps[i].map;
const char *name = maps[i].name;
void **mmaped = maps[i].mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
pr_warn("failed to find skeleton map '%s'\n", name);
return -ESRCH;
}
/* externs shouldn't be pre-setup from user code */
if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
*mmaped = (*map)->mmaped;
}
return 0;
}
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
size_t prog_cnt)
{
int i;
for (i = 0; i < prog_cnt; i++) {
struct bpf_program **prog = progs[i].prog;
const char *name = progs[i].name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
pr_warn("failed to find skeleton program '%s'\n", name);
return -ESRCH;
}
}
return 0;
}
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
......@@ -11976,7 +12039,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
.object_name = s->name,
);
struct bpf_object *obj;
int i, err;
int err;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
......@@ -11999,37 +12062,91 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map **map = s->maps[i].map;
const char *name = s->maps[i].name;
void **mmaped = s->maps[i].mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
pr_warn("failed to find skeleton map '%s'\n", name);
return libbpf_err(-ESRCH);
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
/* externs shouldn't be pre-setup from user code */
if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
*mmaped = (*map)->mmaped;
return 0;
}
int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
{
int err, len, var_idx, i;
const char *var_name;
const struct bpf_map *map;
struct btf *btf;
__u32 map_type_id;
const struct btf_type *map_type, *var_type;
const struct bpf_var_skeleton *var_skel;
struct btf_var_secinfo *var;
if (!s->obj)
return libbpf_err(-EINVAL);
btf = bpf_object__btf(s->obj);
if (!btf) {
pr_warn("subskeletons require BTF at runtime (object %s)\n",
bpf_object__name(s->obj));
return libbpf_err(-errno);
}
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program **prog = s->progs[i].prog;
const char *name = s->progs[i].name;
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
pr_warn("failed to find skeleton program '%s'\n", name);
return libbpf_err(-ESRCH);
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
var_skel = &s->vars[var_idx];
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
if (!btf_is_datasec(map_type)) {
pr_warn("type for map '%1$s' is not a datasec: %2$s",
bpf_map__name(map),
__btf_kind_str(btf_kind(map_type)));
return libbpf_err(-EINVAL);
}
len = btf_vlen(map_type);
var = btf_var_secinfos(map_type);
for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
var_name = btf__name_by_offset(btf, var_type->name_off);
if (strcmp(var_name, var_skel->name) == 0) {
*var_skel->addr = map->mmaped + var->offset;
break;
}
}
}
return 0;
}
void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
{
if (!s)
return;
free(s->maps);
free(s->progs);
free(s->vars);
free(s);
}
int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
{
int i, err;
......
......@@ -1312,6 +1312,35 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
struct bpf_var_skeleton {
const char *name;
struct bpf_map **map;
void **addr;
};
struct bpf_object_subskeleton {
size_t sz; /* size of this struct, for forward/backward compatibility */
const struct bpf_object *obj;
int map_cnt;
int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
struct bpf_map_skeleton *maps;
int prog_cnt;
int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
struct bpf_prog_skeleton *progs;
int var_cnt;
int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */
struct bpf_var_skeleton *vars;
};
LIBBPF_API int
bpf_object__open_subskeleton(struct bpf_object_subskeleton *s);
LIBBPF_API void
bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
struct gen_loader_opts {
size_t sz; /* size of this struct, for forward/backward compatiblity */
const char *data;
......
......@@ -442,6 +442,8 @@ LIBBPF_0.7.0 {
LIBBPF_0.8.0 {
global:
bpf_object__destroy_subskeleton;
bpf_object__open_subskeleton;
libbpf_register_prog_handler;
libbpf_unregister_prog_handler;
bpf_program__attach_kprobe_multi_opts;
......
......@@ -54,6 +54,10 @@ enum libbpf_strict_mode {
*
* Note, in this mode the program pin path will be based on the
* function name instead of section name.
*
* Additionally, routines in the .text section are always considered
* sub-programs. Legacy behavior allows for a single routine in .text
* to be a program.
*/
LIBBPF_STRICT_SEC_NAME = 0x04,
/*
......
......@@ -31,6 +31,7 @@ test_tcp_check_syncookie_user
test_sysctl
xdping
test_cpp
*.subskel.h
*.skel.h
*.lskel.h
/no_alu32
......
......@@ -327,7 +327,13 @@ endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
linked_vars.skel.h linked_maps.skel.h \
test_subskeleton.skel.h test_subskeleton_lib.skel.h
# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
# but that's created as a side-effect of the skel.h generation.
test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
......@@ -405,6 +411,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$(@:.skel.h=.subskel.h)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
......@@ -422,6 +429,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
endif
# ensure we set up tests.h header generation rule just once
......@@ -559,6 +567,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "test_subskeleton.skel.h"
#include "test_subskeleton_lib.subskel.h"
static void subskeleton_lib_setup(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return;
*lib->rodata.var1 = 1;
*lib->data.var2 = 2;
lib->bss.var3->var3_1 = 3;
lib->bss.var3->var3_2 = 4;
test_subskeleton_lib__destroy(lib);
}
static int subskeleton_lib_subresult(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
int result;
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return -EINVAL;
result = *lib->bss.libout1;
ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult");
ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler");
ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler),
"lib_perf_handler", "program name");
ASSERT_OK_PTR(lib->maps.map1, "map1");
ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name");
ASSERT_EQ(*lib->data.var5, 5, "__weak var5");
ASSERT_EQ(*lib->data.var6, 6, "extern var6");
ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL");
test_subskeleton_lib__destroy(lib);
return result;
}
void test_subskeleton(void)
{
int err, result;
struct test_subskeleton *skel;
skel = test_subskeleton__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->rodata->rovar1 = 10;
skel->rodata->var1 = 1;
subskeleton_lib_setup(skel->obj);
err = test_subskeleton__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = test_subskeleton__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
result = subskeleton_lib_subresult(skel->obj) * 10;
ASSERT_EQ(skel->bss->out1, result, "unexpected calculation");
cleanup:
test_subskeleton__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read, compiler may assume 0 otherwise */
const volatile int rovar1;
int out1;
/* Override weak symbol in test_subskeleton_lib */
int var5 = 5;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
extern int lib_routine(void);
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
(void) CONFIG_BPF_SYSCALL;
out1 = lib_routine() * rovar1;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read */
const volatile int var1;
volatile int var2 = 1;
struct {
int var3_1;
__s64 var3_2;
} var3;
int libout1;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
int var4[4];
__weak int var5 SEC(".data");
/* Fully contained within library extern-and-definition */
extern int var6;
int var7 SEC(".data.custom");
int (*fn_ptr)(void);
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map1 SEC(".maps");
extern struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
int lib_routine(void)
{
__u32 key = 1, value = 2;
(void) CONFIG_BPF_SYSCALL;
bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
return libout1;
}
SEC("perf_event")
int lib_perf_handler(struct pt_regs *ctx)
{
return 0;
}
char LICENSE[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int var6 = 6;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
char LICENSE[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment