Commit 1373ff59 authored by Shuyi Cheng's avatar Shuyi Cheng Committed by Andrii Nakryiko

libbpf: Introduce 'btf_custom_path' to 'bpf_obj_open_opts'

btf_custom_path allows developers to load custom BTF which libbpf will
subsequently use for CO-RE relocation instead of vmlinux BTF.

Having btf_custom_path in bpf_object_open_opts one can directly use the
skeleton's <objname>_bpf__open_opts() API to pass in the btf_custom_path
parameter, as opposed to using bpf_object__load_xattr() which is slated to be
deprecated ([0]).

This work continues previous work started by another developer ([1]).

  [0] https://lore.kernel.org/bpf/CAEf4BzbJZLjNoiK8_VfeVg_Vrg=9iYFv+po-38SMe=UzwDKJ=Q@mail.gmail.com/#t
  [1] https://yhbt.net/lore/all/CAEf4Bzbgw49w2PtowsrzKQNcxD4fZRE6AKByX-5-dMo-+oWHHA@mail.gmail.com/Signed-off-by: default avatarShuyi Cheng <chengshuyi@linux.alibaba.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/1626180159-112996-2-git-send-email-chengshuyi@linux.alibaba.com
parent 88865347
...@@ -498,6 +498,10 @@ struct bpf_object { ...@@ -498,6 +498,10 @@ struct bpf_object {
* it at load time. * it at load time.
*/ */
struct btf *btf_vmlinux; struct btf *btf_vmlinux;
/* Path to the custom BTF to be used for BPF CO-RE relocations as an
* override for vmlinux BTF.
*/
char *btf_custom_path;
/* vmlinux BTF override for CO-RE relocations */ /* vmlinux BTF override for CO-RE relocations */
struct btf *btf_vmlinux_override; struct btf *btf_vmlinux_override;
/* Lazily initialized kernel module BTFs */ /* Lazily initialized kernel module BTFs */
...@@ -2645,8 +2649,10 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) ...@@ -2645,8 +2649,10 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
struct bpf_program *prog; struct bpf_program *prog;
int i; int i;
/* CO-RE relocations need kernel BTF */ /* CO-RE relocations need kernel BTF, only when btf_custom_path
if (obj->btf_ext && obj->btf_ext->core_relo_info.len) * is not specified
*/
if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
return true; return true;
/* Support for typed ksyms needs kernel BTF */ /* Support for typed ksyms needs kernel BTF */
...@@ -7596,7 +7602,7 @@ static struct bpf_object * ...@@ -7596,7 +7602,7 @@ static struct bpf_object *
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts) const struct bpf_object_open_opts *opts)
{ {
const char *obj_name, *kconfig; const char *obj_name, *kconfig, *btf_tmp_path;
struct bpf_program *prog; struct bpf_program *prog;
struct bpf_object *obj; struct bpf_object *obj;
char tmp_name[64]; char tmp_name[64];
...@@ -7627,6 +7633,19 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, ...@@ -7627,6 +7633,19 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
if (IS_ERR(obj)) if (IS_ERR(obj))
return obj; return obj;
btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
if (btf_tmp_path) {
if (strlen(btf_tmp_path) >= PATH_MAX) {
err = -ENAMETOOLONG;
goto out;
}
obj->btf_custom_path = strdup(btf_tmp_path);
if (!obj->btf_custom_path) {
err = -ENOMEM;
goto out;
}
}
kconfig = OPTS_GET(opts, kconfig, NULL); kconfig = OPTS_GET(opts, kconfig, NULL);
if (kconfig) { if (kconfig) {
obj->kconfig = strdup(kconfig); obj->kconfig = strdup(kconfig);
...@@ -8097,7 +8116,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) ...@@ -8097,7 +8116,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__sanitize_maps(obj); err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj); err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object__create_maps(obj); err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__relocate(obj, attr->target_btf_path); err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level); err = err ? : bpf_object__load_progs(obj, attr->log_level);
if (obj->gen_loader) { if (obj->gen_loader) {
...@@ -8744,6 +8763,7 @@ void bpf_object__close(struct bpf_object *obj) ...@@ -8744,6 +8763,7 @@ void bpf_object__close(struct bpf_object *obj)
for (i = 0; i < obj->nr_maps; i++) for (i = 0; i < obj->nr_maps; i++)
bpf_map__destroy(&obj->maps[i]); bpf_map__destroy(&obj->maps[i]);
zfree(&obj->btf_custom_path);
zfree(&obj->kconfig); zfree(&obj->kconfig);
zfree(&obj->externs); zfree(&obj->externs);
obj->nr_extern = 0; obj->nr_extern = 0;
......
...@@ -94,8 +94,15 @@ struct bpf_object_open_opts { ...@@ -94,8 +94,15 @@ struct bpf_object_open_opts {
* system Kconfig for CONFIG_xxx externs. * system Kconfig for CONFIG_xxx externs.
*/ */
const char *kconfig; const char *kconfig;
/* Path to the custom BTF to be used for BPF CO-RE relocations.
* This custom BTF completely replaces the use of vmlinux BTF
* for the purpose of CO-RE relocations.
* NOTE: any other BPF feature (e.g., fentry/fexit programs,
* struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux.
*/
const char *btf_custom_path;
}; };
#define bpf_object_open_opts__last_field kconfig #define bpf_object_open_opts__last_field btf_custom_path
LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
LIBBPF_API struct bpf_object * LIBBPF_API struct bpf_object *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment