Commit d564ffde authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf-allow-invoking-kfuncs-from-bpf_prog_type_syscall-progs'

David Vernet says:

====================
bpf: Allow invoking kfuncs from BPF_PROG_TYPE_SYSCALL progs

Currently, a set of core BPF kfuncs (e.g. bpf_task_*, bpf_cgroup_*,
bpf_cpumask_*, etc) cannot be invoked from BPF_PROG_TYPE_SYSCALL
programs. The whitelist approach taken for enabling kfuncs makes sense:
it not safe to call these kfuncs from every program type. For example,
it may not be safe to call bpf_task_acquire() in an fentry to
free_task().

BPF_PROG_TYPE_SYSCALL, on the other hand, is a perfectly safe program
type from which to invoke these kfuncs, as it's a very controlled
environment, and we should never be able to run into any of the typical
problems such as recursive invoations, acquiring references on freeing
kptrs, etc. Being able to invoke these kfuncs would be useful, as
BPF_PROG_TYPE_SYSCALL can be invoked with BPF_PROG_RUN, and would
therefore enable user space programs to synchronously call into BPF to
manipulate these kptrs.
---

v1: https://lore.kernel.org/all/20240404010308.334604-1-void@manifault.com/
v1 -> v2:

- Create new verifier_kfunc_prog_types testcase meant to specifically
  validate calling core kfuncs from various program types. Remove the
  macros and testcases that had been added to the task, cgrp, and
  cpumask kfunc testcases (Andrii and Yonghong)
====================

Link: https://lore.kernel.org/r/20240405143041.632519-1-void@manifault.comSigned-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 00d5d22a 1bc724af
......@@ -474,6 +474,7 @@ static int __init cpumask_kfunc_init(void)
ret = bpf_mem_alloc_init(&bpf_cpumask_ma, sizeof(struct bpf_cpumask), false);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &cpumask_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,
ARRAY_SIZE(cpumask_dtors),
THIS_MODULE);
......
......@@ -2653,6 +2653,7 @@ static int __init kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
ARRAY_SIZE(generic_dtors),
THIS_MODULE);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "verifier_kfunc_prog_types.skel.h"
void test_verifier_kfunc_prog_types(void)
{
RUN_TESTS(verifier_kfunc_prog_types);
}
......@@ -13,7 +13,7 @@ struct __cgrps_kfunc_map_value {
struct cgroup __kptr * cgrp;
};
struct hash_map {
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct __cgrps_kfunc_map_value);
......
......@@ -13,7 +13,7 @@ struct __tasks_kfunc_map_value {
struct task_struct __kptr * task;
};
struct hash_map {
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct __tasks_kfunc_map_value);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "cgrp_kfunc_common.h"
#include "cpumask_common.h"
#include "task_kfunc_common.h"
char _license[] SEC("license") = "GPL";
/***************
* Task kfuncs *
***************/
static void task_kfunc_load_test(void)
{
struct task_struct *current, *ref_1, *ref_2;
current = bpf_get_current_task_btf();
ref_1 = bpf_task_from_pid(current->pid);
if (!ref_1)
return;
ref_2 = bpf_task_acquire(ref_1);
if (ref_2)
bpf_task_release(ref_2);
bpf_task_release(ref_1);
}
SEC("raw_tp")
__failure __msg("calling kernel function")
int BPF_PROG(task_kfunc_raw_tp)
{
task_kfunc_load_test();
return 0;
}
SEC("syscall")
__success
int BPF_PROG(task_kfunc_syscall)
{
task_kfunc_load_test();
return 0;
}
/*****************
* cgroup kfuncs *
*****************/
static void cgrp_kfunc_load_test(void)
{
struct cgroup *cgrp, *ref;
cgrp = bpf_cgroup_from_id(0);
if (!cgrp)
return;
ref = bpf_cgroup_acquire(cgrp);
if (!ref) {
bpf_cgroup_release(cgrp);
return;
}
bpf_cgroup_release(ref);
bpf_cgroup_release(cgrp);
}
SEC("raw_tp")
__failure __msg("calling kernel function")
int BPF_PROG(cgrp_kfunc_raw_tp)
{
cgrp_kfunc_load_test();
return 0;
}
SEC("syscall")
__success
int BPF_PROG(cgrp_kfunc_syscall)
{
cgrp_kfunc_load_test();
return 0;
}
/******************
* cpumask kfuncs *
******************/
static void cpumask_kfunc_load_test(void)
{
struct bpf_cpumask *alloc, *ref;
alloc = bpf_cpumask_create();
if (!alloc)
return;
ref = bpf_cpumask_acquire(alloc);
bpf_cpumask_set_cpu(0, alloc);
bpf_cpumask_test_cpu(0, (const struct cpumask *)ref);
bpf_cpumask_release(ref);
bpf_cpumask_release(alloc);
}
SEC("raw_tp")
__failure __msg("calling kernel function")
int BPF_PROG(cpumask_kfunc_raw_tp)
{
cpumask_kfunc_load_test();
return 0;
}
SEC("syscall")
__success
int BPF_PROG(cpumask_kfunc_syscall)
{
cpumask_kfunc_load_test();
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment