Commit a6ed02ca authored by KP Singh's avatar KP Singh Committed by Alexei Starovoitov

libbpf: Load btf_vmlinux only once per object.

As more programs (TRACING, STRUCT_OPS, and upcoming LSM) use vmlinux
BTF information, loading the BTF vmlinux information for every program
in an object is sub-optimal. The fix was originally proposed in:

   https://lore.kernel.org/bpf/CAEf4BzZodr3LKJuM7QwD38BiEH02Cc1UbtnGpVkCJ00Mf+V_Qg@mail.gmail.com/

The btf_vmlinux is populated in the object if any of the programs in
the object requires it just before the programs are loaded and freed
after the programs finish loading.
Reported-by: default avatarAndrii Nakryiko <andrii.nakryiko@gmail.com>
Signed-off-by: default avatarKP Singh <kpsingh@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Reviewed-by: default avatarBrendan Jackman <jackmanb@chromium.org>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200117212825.11755-1-kpsingh@chromium.org
parent c701917e
...@@ -385,6 +385,10 @@ struct bpf_object { ...@@ -385,6 +385,10 @@ struct bpf_object {
struct list_head list; struct list_head list;
struct btf *btf; struct btf *btf;
/* Parse and load BTF vmlinux if any of the programs in the object need
* it at load time.
*/
struct btf *btf_vmlinux;
struct btf_ext *btf_ext; struct btf_ext *btf_ext;
void *priv; void *priv;
...@@ -633,7 +637,8 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t, ...@@ -633,7 +637,8 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t,
} }
#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
#define STRUCT_OPS_VALUE_PREFIX_LEN (sizeof(STRUCT_OPS_VALUE_PREFIX) - 1) static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind);
static int static int
find_struct_ops_kern_types(const struct btf *btf, const char *tname, find_struct_ops_kern_types(const struct btf *btf, const char *tname,
...@@ -644,7 +649,6 @@ find_struct_ops_kern_types(const struct btf *btf, const char *tname, ...@@ -644,7 +649,6 @@ find_struct_ops_kern_types(const struct btf *btf, const char *tname,
const struct btf_type *kern_type, *kern_vtype; const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member; const struct btf_member *kern_data_member;
__s32 kern_vtype_id, kern_type_id; __s32 kern_vtype_id, kern_type_id;
char vtname[128] = STRUCT_OPS_VALUE_PREFIX;
__u32 i; __u32 i;
kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
...@@ -660,13 +664,11 @@ find_struct_ops_kern_types(const struct btf *btf, const char *tname, ...@@ -660,13 +664,11 @@ find_struct_ops_kern_types(const struct btf *btf, const char *tname,
* find "struct bpf_struct_ops_tcp_congestion_ops" from the * find "struct bpf_struct_ops_tcp_congestion_ops" from the
* btf_vmlinux. * btf_vmlinux.
*/ */
strncat(vtname + STRUCT_OPS_VALUE_PREFIX_LEN, tname, kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
sizeof(vtname) - STRUCT_OPS_VALUE_PREFIX_LEN - 1); tname, BTF_KIND_STRUCT);
kern_vtype_id = btf__find_by_name_kind(btf, vtname,
BTF_KIND_STRUCT);
if (kern_vtype_id < 0) { if (kern_vtype_id < 0) {
pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
vtname); STRUCT_OPS_VALUE_PREFIX, tname);
return kern_vtype_id; return kern_vtype_id;
} }
kern_vtype = btf__type_by_id(btf, kern_vtype_id); kern_vtype = btf__type_by_id(btf, kern_vtype_id);
...@@ -683,8 +685,8 @@ find_struct_ops_kern_types(const struct btf *btf, const char *tname, ...@@ -683,8 +685,8 @@ find_struct_ops_kern_types(const struct btf *btf, const char *tname,
break; break;
} }
if (i == btf_vlen(kern_vtype)) { if (i == btf_vlen(kern_vtype)) {
pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n", pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
tname, vtname); tname, STRUCT_OPS_VALUE_PREFIX, tname);
return -EINVAL; return -EINVAL;
} }
...@@ -835,7 +837,6 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map, ...@@ -835,7 +837,6 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
{ {
struct btf *kern_btf = NULL;
struct bpf_map *map; struct bpf_map *map;
size_t i; size_t i;
int err; int err;
...@@ -846,20 +847,12 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) ...@@ -846,20 +847,12 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
if (!bpf_map__is_struct_ops(map)) if (!bpf_map__is_struct_ops(map))
continue; continue;
if (!kern_btf) { err = bpf_map__init_kern_struct_ops(map, obj->btf,
kern_btf = libbpf_find_kernel_btf(); obj->btf_vmlinux);
if (IS_ERR(kern_btf)) if (err)
return PTR_ERR(kern_btf);
}
err = bpf_map__init_kern_struct_ops(map, obj->btf, kern_btf);
if (err) {
btf__free(kern_btf);
return err; return err;
}
} }
btf__free(kern_btf);
return 0; return 0;
} }
...@@ -2357,6 +2350,41 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) ...@@ -2357,6 +2350,41 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
return 0; return 0;
} }
static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
{
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
return true;
/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
* also need vmlinux BTF
*/
if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
return true;
return false;
}
static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
{
struct bpf_program *prog;
int err;
bpf_object__for_each_program(prog, obj) {
if (libbpf_prog_needs_vmlinux_btf(prog)) {
obj->btf_vmlinux = libbpf_find_kernel_btf();
if (IS_ERR(obj->btf_vmlinux)) {
err = PTR_ERR(obj->btf_vmlinux);
pr_warn("Error loading vmlinux BTF: %d\n", err);
obj->btf_vmlinux = NULL;
return err;
}
return 0;
}
}
return 0;
}
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{ {
int err = 0; int err = 0;
...@@ -4884,18 +4912,14 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, ...@@ -4884,18 +4912,14 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
return ret; return ret;
} }
static int libbpf_find_attach_btf_id(const char *name, static int libbpf_find_attach_btf_id(struct bpf_program *prog);
enum bpf_attach_type attach_type,
__u32 attach_prog_fd);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{ {
int err = 0, fd, i, btf_id; int err = 0, fd, i, btf_id;
if (prog->type == BPF_PROG_TYPE_TRACING) { if (prog->type == BPF_PROG_TYPE_TRACING) {
btf_id = libbpf_find_attach_btf_id(prog->section_name, btf_id = libbpf_find_attach_btf_id(prog);
prog->expected_attach_type,
prog->attach_prog_fd);
if (btf_id <= 0) if (btf_id <= 0)
return btf_id; return btf_id;
prog->attach_btf_id = btf_id; prog->attach_btf_id = btf_id;
...@@ -5273,10 +5297,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) ...@@ -5273,10 +5297,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj); err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj); err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj); err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object__create_maps(obj); err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__relocate(obj, attr->target_btf_path); err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level); err = err ? : bpf_object__load_progs(obj, attr->log_level);
btf__free(obj->btf_vmlinux);
obj->btf_vmlinux = NULL;
if (err) if (err)
goto out; goto out;
...@@ -6497,34 +6526,52 @@ static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj, ...@@ -6497,34 +6526,52 @@ static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
return -EINVAL; return -EINVAL;
} }
#define BTF_PREFIX "btf_trace_" #define BTF_TRACE_PREFIX "btf_trace_"
#define BTF_MAX_NAME_SIZE 128
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind)
{
char btf_type_name[BTF_MAX_NAME_SIZE];
int ret;
ret = snprintf(btf_type_name, sizeof(btf_type_name),
"%s%s", prefix, name);
/* snprintf returns the number of characters written excluding the
* the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
* indicates truncation.
*/
if (ret < 0 || ret >= sizeof(btf_type_name))
return -ENAMETOOLONG;
return btf__find_by_name_kind(btf, btf_type_name, kind);
}
static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
enum bpf_attach_type attach_type)
{
int err;
if (attach_type == BPF_TRACE_RAW_TP)
err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
BTF_KIND_TYPEDEF);
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
return err;
}
int libbpf_find_vmlinux_btf_id(const char *name, int libbpf_find_vmlinux_btf_id(const char *name,
enum bpf_attach_type attach_type) enum bpf_attach_type attach_type)
{ {
struct btf *btf = libbpf_find_kernel_btf(); struct btf *btf;
char raw_tp_btf[128] = BTF_PREFIX;
char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
const char *btf_name;
int err = -EINVAL;
__u32 kind;
btf = libbpf_find_kernel_btf();
if (IS_ERR(btf)) { if (IS_ERR(btf)) {
pr_warn("vmlinux BTF is not found\n"); pr_warn("vmlinux BTF is not found\n");
return -EINVAL; return -EINVAL;
} }
if (attach_type == BPF_TRACE_RAW_TP) { return __find_vmlinux_btf_id(btf, name, attach_type);
/* prepend "btf_trace_" prefix per kernel convention */
strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
btf_name = raw_tp_btf;
kind = BTF_KIND_TYPEDEF;
} else {
btf_name = name;
kind = BTF_KIND_FUNC;
}
err = btf__find_by_name_kind(btf, btf_name, kind);
btf__free(btf);
return err;
} }
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
...@@ -6560,10 +6607,11 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) ...@@ -6560,10 +6607,11 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
return err; return err;
} }
static int libbpf_find_attach_btf_id(const char *name, static int libbpf_find_attach_btf_id(struct bpf_program *prog)
enum bpf_attach_type attach_type,
__u32 attach_prog_fd)
{ {
enum bpf_attach_type attach_type = prog->expected_attach_type;
__u32 attach_prog_fd = prog->attach_prog_fd;
const char *name = prog->section_name;
int i, err; int i, err;
if (!name) if (!name)
...@@ -6578,8 +6626,9 @@ static int libbpf_find_attach_btf_id(const char *name, ...@@ -6578,8 +6626,9 @@ static int libbpf_find_attach_btf_id(const char *name,
err = libbpf_find_prog_btf_id(name + section_defs[i].len, err = libbpf_find_prog_btf_id(name + section_defs[i].len,
attach_prog_fd); attach_prog_fd);
else else
err = libbpf_find_vmlinux_btf_id(name + section_defs[i].len, err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
attach_type); name + section_defs[i].len,
attach_type);
if (err <= 0) if (err <= 0)
pr_warn("%s is not found in vmlinux BTF\n", name); pr_warn("%s is not found in vmlinux BTF\n", name);
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment