Commit 93265a0b authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf-resilient-split-btf-followups'

Alan Maguire says:

====================
bpf: resilient split BTF followups

Follow-up to resilient split BTF series [1],

- cleaning up libbpf relocation code (patch 1);
- adding 'struct module' support for base BTF data (patch 2);
- splitting out field iteration code into separate file (patch 3);
- sharing libbpf relocation code with the kernel (patch 4);
- adding a kbuild --btf_features flag to generate distilled base
  BTF in the module-specific case where KBUILD_EXTMOD is true
  (patch 5); and
- adding test coverage for module-based kfunc dtor (patch 6)

Generation of distilled base BTF for modules requires the pahole patch
at [2], but without it we just won't get distilled base BTF (and thus BTF
relocation on module load) for bpf_testmod.ko.

Changes since v1 [3]:

- fixed line lengths and made comparison an explicit == 0 (Andrii, patch 1)
- moved btf_iter.c changes to separate patch (Andrii, patch 3)
- grouped common targets in kernel/bpf/Makefile (Andrii, patch 4)
- updated bpf_testmod ctx alloc to use GFP_ATOMIC, and updated dtor
  selftest to use map-based dtor cleanup (Eduard, patch 6)

[1] https://lore.kernel.org/bpf/20240613095014.357981-1-alan.maguire@oracle.com/
[2] https://lore.kernel.org/bpf/20240517102714.4072080-1-alan.maguire@oracle.com/
[3] https://lore.kernel.org/bpf/20240618162449.809994-1-alan.maguire@oracle.com/
====================

Link: https://lore.kernel.org/r/20240620091733.1967885-1-alan.maguire@oracle.comSigned-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents cd387ce5 47a8cf0c
......@@ -140,6 +140,7 @@ extern const struct file_operations btf_fops;
const char *btf_get_name(const struct btf *btf);
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
const struct btf_header *btf_header(const struct btf *btf);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
......@@ -212,8 +213,10 @@ int btf_get_fd_by_id(u32 id);
u32 btf_obj_id(const struct btf *btf);
bool btf_is_kernel(const struct btf *btf);
bool btf_is_module(const struct btf *btf);
bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
struct btf *btf_base_btf(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
......@@ -339,6 +342,11 @@ static inline u8 btf_int_offset(const struct btf_type *t)
return BTF_INT_OFFSET(*(u32 *)(t + 1));
}
static inline __u8 btf_int_bits(const struct btf_type *t)
{
return BTF_INT_BITS(*(__u32 *)(t + 1));
}
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
......@@ -478,6 +486,11 @@ static inline struct btf_param *btf_params(const struct btf_type *t)
return (struct btf_param *)(t + 1);
}
static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
{
return (struct btf_decl_tag *)(t + 1);
}
static inline int btf_id_cmp_func(const void *a, const void *b)
{
const int *pa = a, *pb = b;
......@@ -515,9 +528,38 @@ static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *
}
#endif
enum btf_field_iter_kind {
BTF_FIELD_ITER_IDS,
BTF_FIELD_ITER_STRS,
};
struct btf_field_desc {
/* once-per-type offsets */
int t_off_cnt, t_offs[2];
/* member struct size, or zero, if no members */
int m_sz;
/* repeated per-member offsets */
int m_off_cnt, m_offs[1];
};
struct btf_field_iter {
struct btf_field_desc desc;
void *p;
int m_idx;
int off_idx;
int vlen;
};
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids);
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
enum btf_field_iter_kind iter_kind);
__u32 *btf_field_iter_next(struct btf_field_iter *it);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
......@@ -544,6 +586,28 @@ static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
{
return NULL;
}
static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{
}
static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf,
__u32 **map_ids)
{
return -EOPNOTSUPP;
}
static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
enum btf_field_iter_kind iter_kind)
{
return -EOPNOTSUPP;
}
static inline __u32 *btf_field_iter_next(struct btf_field_iter *it)
{
return NULL;
}
static inline const char *btf_name_by_offset(const struct btf *btf,
u32 offset)
{
......
......@@ -509,7 +509,9 @@ struct module {
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
unsigned int btf_data_size;
unsigned int btf_base_data_size;
void *btf_data;
void *btf_base_data;
#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
......
......@@ -50,5 +50,11 @@ endif
obj-$(CONFIG_BPF_PRELOAD) += preload/
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE
obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
# Some source files are common to libbpf.
vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf
$(obj)/%.o: %.c FORCE
$(call if_changed_rule,cc_o_c)
This diff is collapsed.
......@@ -2166,6 +2166,8 @@ static int find_module_sections(struct module *mod, struct load_info *info)
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
mod->btf_base_data = any_section_objs(info, ".BTF.base", 1,
&mod->btf_base_data_size);
#endif
#ifdef CONFIG_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
......@@ -2590,8 +2592,9 @@ static noinline int do_init_module(struct module *mod)
}
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
/* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
/* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */
mod->btf_data = NULL;
mod->btf_base_data = NULL;
#endif
/*
* We want to free module_init, but be aware that kallsyms may be
......
......@@ -21,8 +21,13 @@ else
# Switch to using --btf_features for v1.26 and later.
pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs
ifneq ($(KBUILD_EXTMOD),)
module-pahole-flags-$(call test-ge, $(pahole-ver), 126) += --btf_features=distilled_base
endif
endif
pahole-flags-$(CONFIG_PAHOLE_HAS_LANG_EXCLUDE) += --lang_exclude=rust
export PAHOLE_FLAGS := $(pahole-flags-y)
export MODULE_PAHOLE_FLAGS := $(module-pahole-flags-y)
......@@ -41,7 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@
if [ ! -f vmlinux ]; then \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
else \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) $(MODULE_PAHOLE_FLAGS) --btf_base vmlinux $@; \
$(RESOLVE_BTFIDS) -b vmlinux $@; \
fi;
......
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
usdt.o zip.o elf.o features.o btf_relocate.o
usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
......@@ -5093,168 +5093,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
return btf__parse_split(path, vmlinux_btf);
}
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind)
{
it->p = NULL;
it->m_idx = -1;
it->off_idx = 0;
it->vlen = 0;
switch (iter_kind) {
case BTF_FIELD_ITER_IDS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
break;
case BTF_KIND_ARRAY:
it->desc = (struct btf_field_desc) {
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, type)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, type)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, type)}
};
break;
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_var_secinfo),
1, {offsetof(struct btf_var_secinfo, type)}
};
break;
default:
return -EINVAL;
}
break;
case BTF_FIELD_ITER_STRS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
case BTF_KIND_ARRAY:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)}
};
break;
case BTF_KIND_ENUM:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum),
1, {offsetof(struct btf_enum, name_off)}
};
break;
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum64),
1, {offsetof(struct btf_enum64, name_off)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, name_off)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, name_off)}
};
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
if (it->desc.m_sz)
it->vlen = btf_vlen(t);
it->p = t;
return 0;
}
__u32 *btf_field_iter_next(struct btf_field_iter *it)
{
if (!it->p)
return NULL;
if (it->m_idx < 0) {
if (it->off_idx < it->desc.t_off_cnt)
return it->p + it->desc.t_offs[it->off_idx++];
/* move to per-member iteration */
it->m_idx = 0;
it->p += sizeof(struct btf_type);
it->off_idx = 0;
}
/* if type doesn't have members, stop */
if (it->desc.m_sz == 0) {
it->p = NULL;
return NULL;
}
if (it->off_idx >= it->desc.m_off_cnt) {
/* exhausted this member's fields, go to the next member */
it->m_idx++;
it->p += it->desc.m_sz;
it->off_idx = 0;
}
if (it->m_idx < it->vlen)
return it->p + it->desc.m_offs[it->off_idx++];
it->p = NULL;
return NULL;
}
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
{
const struct btf_ext_info *seg;
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2021 Facebook */
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifdef __KERNEL__
#include <linux/bpf.h>
#include <linux/btf.h>
#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
#else
#include "btf.h"
#include "libbpf_internal.h"
#endif
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
enum btf_field_iter_kind iter_kind)
{
it->p = NULL;
it->m_idx = -1;
it->off_idx = 0;
it->vlen = 0;
switch (iter_kind) {
case BTF_FIELD_ITER_IDS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
break;
case BTF_KIND_ARRAY:
it->desc = (struct btf_field_desc) {
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, type)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, type)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, type)}
};
break;
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_var_secinfo),
1, {offsetof(struct btf_var_secinfo, type)}
};
break;
default:
return -EINVAL;
}
break;
case BTF_FIELD_ITER_STRS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
case BTF_KIND_ARRAY:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)}
};
break;
case BTF_KIND_ENUM:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum),
1, {offsetof(struct btf_enum, name_off)}
};
break;
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum64),
1, {offsetof(struct btf_enum64, name_off)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, name_off)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, name_off)}
};
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
if (it->desc.m_sz)
it->vlen = btf_vlen(t);
it->p = t;
return 0;
}
__u32 *btf_field_iter_next(struct btf_field_iter *it)
{
if (!it->p)
return NULL;
if (it->m_idx < 0) {
if (it->off_idx < it->desc.t_off_cnt)
return it->p + it->desc.t_offs[it->off_idx++];
/* move to per-member iteration */
it->m_idx = 0;
it->p += sizeof(struct btf_type);
it->off_idx = 0;
}
/* if type doesn't have members, stop */
if (it->desc.m_sz == 0) {
it->p = NULL;
return NULL;
}
if (it->off_idx >= it->desc.m_off_cnt) {
/* exhausted this member's fields, go to the next member */
it->m_idx++;
it->p += it->desc.m_sz;
it->off_idx = 0;
}
if (it->m_idx < it->vlen)
return it->p + it->desc.m_offs[it->off_idx++];
it->p = NULL;
return NULL;
}
......@@ -5,11 +5,34 @@
#define _GNU_SOURCE
#endif
#ifdef __KERNEL__
#include <linux/bpf.h>
#include <linux/bsearch.h>
#include <linux/btf.h>
#include <linux/sort.h>
#include <linux/string.h>
#include <linux/bpf_verifier.h>
#define btf_type_by_id (struct btf_type *)btf_type_by_id
#define btf__type_cnt btf_nr_types
#define btf__base_btf btf_base_btf
#define btf__name_by_offset btf_name_by_offset
#define btf__str_by_offset btf_str_by_offset
#define btf_kflag btf_type_kflag
#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
#define free(ptr) kvfree(ptr)
#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
#else
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
#endif /* __KERNEL__ */
struct btf;
struct btf_relocate {
......@@ -160,7 +183,7 @@ static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
*/
static int btf_relocate_map_distilled_base(struct btf_relocate *r)
{
struct btf_name_info *dist_base_info_sorted, *dist_base_info_sorted_end;
struct btf_name_info *info, *info_end;
struct btf_type *base_t, *dist_t;
__u8 *base_name_cnt = NULL;
int err = 0;
......@@ -169,26 +192,24 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
/* generate a sort index array of name/type ids sorted by name for
* distilled base BTF to speed name-based lookups.
*/
dist_base_info_sorted = calloc(r->nr_dist_base_types, sizeof(*dist_base_info_sorted));
if (!dist_base_info_sorted) {
info = calloc(r->nr_dist_base_types, sizeof(*info));
if (!info) {
err = -ENOMEM;
goto done;
}
dist_base_info_sorted_end = dist_base_info_sorted + r->nr_dist_base_types;
info_end = info + r->nr_dist_base_types;
for (id = 0; id < r->nr_dist_base_types; id++) {
dist_t = btf_type_by_id(r->dist_base_btf, id);
dist_base_info_sorted[id].name = btf__name_by_offset(r->dist_base_btf,
dist_t->name_off);
dist_base_info_sorted[id].id = id;
dist_base_info_sorted[id].size = dist_t->size;
dist_base_info_sorted[id].needs_size = true;
info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
info[id].id = id;
info[id].size = dist_t->size;
info[id].needs_size = true;
}
qsort(dist_base_info_sorted, r->nr_dist_base_types, sizeof(*dist_base_info_sorted),
cmp_btf_name_size);
qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
/* Mark distilled base struct/union members of split BTF structs/unions
* in id_map with BTF_IS_EMBEDDED; this signals that these types
* need to match both name and size, otherwise embeddding the base
* need to match both name and size, otherwise embedding the base
* struct/union in the split type is invalid.
*/
for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
......@@ -216,8 +237,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
/* Now search base BTF for matching distilled base BTF types. */
for (id = 1; id < r->nr_base_types; id++) {
struct btf_name_info *dist_name_info, *dist_name_info_next = NULL;
struct btf_name_info base_name_info = {};
struct btf_name_info *dist_info, base_info = {};
int dist_kind, base_kind;
base_t = btf_type_by_id(r->base_btf, id);
......@@ -225,16 +245,16 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
if (!base_t->name_off)
continue;
base_kind = btf_kind(base_t);
base_name_info.id = id;
base_name_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
base_info.id = id;
base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
switch (base_kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
/* These types should match both name and size */
base_name_info.needs_size = true;
base_name_info.size = base_t->size;
base_info.needs_size = true;
base_info.size = base_t->size;
break;
case BTF_KIND_FWD:
/* No size considerations for fwds. */
......@@ -248,31 +268,24 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
* unless corresponding _base_ types to match them are
* missing.
*/
base_name_info.needs_size = base_name_cnt[base_t->name_off] > 1;
base_name_info.size = base_t->size;
base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
base_info.size = base_t->size;
break;
default:
continue;
}
/* iterate over all matching distilled base types */
for (dist_name_info = search_btf_name_size(&base_name_info, dist_base_info_sorted,
r->nr_dist_base_types);
dist_name_info != NULL; dist_name_info = dist_name_info_next) {
/* Are there more distilled matches to process after
* this one?
*/
dist_name_info_next = dist_name_info + 1;
if (dist_name_info_next >= dist_base_info_sorted_end ||
cmp_btf_name_size(&base_name_info, dist_name_info_next))
dist_name_info_next = NULL;
if (!dist_name_info->id || dist_name_info->id > r->nr_dist_base_types) {
for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
dist_info != NULL && dist_info < info_end &&
cmp_btf_name_size(&base_info, dist_info) == 0;
dist_info++) {
if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
id, dist_name_info->id);
id, dist_info->id);
err = -EINVAL;
goto done;
}
dist_t = btf_type_by_id(r->dist_base_btf, dist_name_info->id);
dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
dist_kind = btf_kind(dist_t);
/* Validate that the found distilled type is compatible.
......@@ -319,15 +332,15 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
/* size verification is required for embedded
* struct/unions.
*/
if (r->id_map[dist_name_info->id] == BTF_IS_EMBEDDED &&
if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
base_t->size != dist_t->size)
continue;
break;
default:
continue;
}
if (r->id_map[dist_name_info->id] &&
r->id_map[dist_name_info->id] != BTF_IS_EMBEDDED) {
if (r->id_map[dist_info->id] &&
r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
/* we already have a match; this tells us that
* multiple base types of the same name
* have the same size, since for cases where
......@@ -337,13 +350,13 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
* to in base BTF, so error out.
*/
pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
base_name_info.name, dist_name_info->id,
base_t->size, id, r->id_map[dist_name_info->id]);
base_info.name, dist_info->id,
base_t->size, id, r->id_map[dist_info->id]);
err = -EINVAL;
goto done;
}
/* map id and name */
r->id_map[dist_name_info->id] = id;
r->id_map[dist_info->id] = id;
r->str_map[dist_t->name_off] = base_t->name_off;
}
}
......@@ -362,7 +375,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
}
done:
free(base_name_cnt);
free(dist_base_info_sorted);
free(info);
return err;
}
......
......@@ -159,6 +159,37 @@ __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
{
}
__bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int *err)
{
struct bpf_testmod_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx) {
*err = -ENOMEM;
return NULL;
}
refcount_set(&ctx->usage, 1);
return ctx;
}
static void testmod_free_cb(struct rcu_head *head)
{
struct bpf_testmod_ctx *ctx;
ctx = container_of(head, struct bpf_testmod_ctx, rcu);
kfree(ctx);
}
__bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
{
if (!ctx)
return;
if (refcount_dec_and_test(&ctx->usage))
call_rcu(&ctx->rcu, testmod_free_cb);
}
struct bpf_testmod_btf_type_tag_1 {
int a;
};
......@@ -369,8 +400,14 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
BTF_ID_LIST(bpf_testmod_dtor_ids)
BTF_ID(struct, bpf_testmod_ctx)
BTF_ID(func, bpf_testmod_ctx_release)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_common_kfunc_ids,
......@@ -904,6 +941,12 @@ extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
{
const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
{
.btf_id = bpf_testmod_dtor_ids[0],
.kfunc_btf_id = bpf_testmod_dtor_ids[1]
},
};
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
......@@ -912,6 +955,9 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
ARRAY_SIZE(bpf_testmod_dtors),
THIS_MODULE);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
......
......@@ -80,6 +80,11 @@ struct sendmsg_args {
int msglen;
};
struct bpf_testmod_ctx {
struct callback_head rcu;
refcount_t usage;
};
struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym;
void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
......@@ -135,4 +140,8 @@ int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym;
int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym;
void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym;
struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym;
void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym;
#endif /* _BPF_TESTMOD_KFUNC_H */
......@@ -78,6 +78,7 @@ static struct kfunc_test_params kfunc_tests[] = {
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
TC_TEST(kfunc_call_test_static_unused_arg, 0),
TC_TEST(kfunc_call_ctx, 0),
};
struct syscall_test_args {
......
......@@ -177,4 +177,41 @@ int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
return actual != expected ? -1 : 0;
}
struct ctx_val {
struct bpf_testmod_ctx __kptr *ctx;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct ctx_val);
} ctx_map SEC(".maps");
SEC("tc")
int kfunc_call_ctx(struct __sk_buff *skb)
{
struct bpf_testmod_ctx *ctx;
int err = 0;
ctx = bpf_testmod_ctx_create(&err);
if (!ctx && !err)
err = -1;
if (ctx) {
int key = 0;
struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
/* Transfer ctx to map to be freed via implicit dtor call
* on cleanup.
*/
if (ctx_val)
ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
if (ctx) {
bpf_testmod_ctx_release(ctx);
err = -1;
}
}
return err;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment