Commit 6cae5a71 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf: Add detection of kfuncs.'

Alexei Starovoitov says:

====================

From: Alexei Starovoitov <ast@kernel.org>

Allow BPF programs detect at load time whether particular kfunc exists.

Patch 1: Allow ld_imm64 to point to kfunc in the kernel.
Patch 2: Fix relocation of kfunc in ld_imm64 insn when kfunc is in kernel module.
Patch 3: Introduce bpf_ksym_exists() macro.
Patch 4: selftest.

NOTE: detection of kfuncs from light skeleton is not supported yet.
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 0f10f647 95fdf6e3
......@@ -15952,8 +15952,8 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
goto err_put;
}
if (!btf_type_is_var(t)) {
verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
if (!btf_type_is_var(t) && !btf_type_is_func(t)) {
verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id);
err = -EINVAL;
goto err_put;
}
......@@ -15966,6 +15966,14 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
err = -ENOENT;
goto err_put;
}
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
if (btf_type_is_func(t)) {
aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
aux->btf_var.mem_size = 0;
goto check_btf;
}
datasec_id = find_btf_percpu_datasec(btf);
if (datasec_id > 0) {
......@@ -15978,9 +15986,6 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
}
}
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
type = t->type;
t = btf_type_skip_modifiers(btf, type, NULL);
if (percpu) {
......@@ -16008,7 +16013,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
aux->btf_var.btf = btf;
aux->btf_var.btf_id = type;
}
check_btf:
/* check whether we recorded this BTF (and maybe module) already */
for (i = 0; i < env->used_btf_cnt; i++) {
if (env->used_btfs[i].btf == btf) {
......
......@@ -177,6 +177,11 @@ enum libbpf_tristate {
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
#define __kptr __attribute__((btf_type_tag("kptr")))
#define bpf_ksym_exists(sym) ({ \
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
!!sym; \
})
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
#endif
......
......@@ -7533,6 +7533,12 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
ext->is_set = true;
ext->ksym.kernel_btf_id = kfunc_id;
ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
/* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
* populates FD into ld_imm64 insn when it's used to point to kfunc.
* {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
* {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
*/
ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
ext->name, kfunc_id);
......
......@@ -17,6 +17,10 @@ int err, pid;
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
void invalid_kfunc(void) __ksym __weak;
void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
static bool is_test_kfunc_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
......@@ -26,7 +30,21 @@ static bool is_test_kfunc_task(void)
static int test_acquire_release(struct task_struct *task)
{
struct task_struct *acquired;
struct task_struct *acquired = NULL;
if (!bpf_ksym_exists(bpf_task_acquire)) {
err = 3;
return 0;
}
if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
err = 4;
return 0;
}
if (bpf_ksym_exists(invalid_kfunc)) {
/* the verifier's dead code elimination should remove this */
err = 5;
asm volatile ("goto -1"); /* for (;;); */
}
acquired = bpf_task_acquire(task);
bpf_task_release(acquired);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment