Commit 8a2162a9 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Support storing struct cgroup * objects as kptrs'

David Vernet says:

====================

In [0], we added support for storing struct task_struct * objects as
kptrs. This patch set extends this effort to also include storing struct
cgroup * object as kptrs.

As with tasks, there are many possible use cases for storing cgroups in
maps. During tracing, for example, it could be useful to query cgroup
statistics such as PSI averages, or tracking which tasks are migrating
to and from the cgroup.

[0]: https://lore.kernel.org/all/20221120051004.3605026-1-void@manifault.com/
====================
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents dc79f035 227a89cf
......@@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/cgroup.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <linux/smp.h>
......@@ -1879,6 +1880,85 @@ void bpf_task_release(struct task_struct *p)
put_task_struct_rcu_user(p);
}
#ifdef CONFIG_CGROUPS
/**
* bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
* this kfunc which is not stored in a map as a kptr, must be released by
* calling bpf_cgroup_release().
* @cgrp: The cgroup on which a reference is being acquired.
*/
struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{
cgroup_get(cgrp);
return cgrp;
}
/**
* bpf_cgroup_kptr_get - Acquire a reference on a struct cgroup kptr. A cgroup
* kptr acquired by this kfunc which is not subsequently stored in a map, must
* be released by calling bpf_cgroup_release().
* @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
*/
struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
{
struct cgroup *cgrp;
rcu_read_lock();
/* Another context could remove the cgroup from the map and release it
* at any time, including after we've done the lookup above. This is
* safe because we're in an RCU read region, so the cgroup is
* guaranteed to remain valid until at least the rcu_read_unlock()
* below.
*/
cgrp = READ_ONCE(*cgrpp);
if (cgrp && !cgroup_tryget(cgrp))
/* If the cgroup had been removed from the map and freed as
* described above, cgroup_tryget() will return false. The
* cgroup will be freed at some point after the current RCU gp
* has ended, so just return NULL to the user.
*/
cgrp = NULL;
rcu_read_unlock();
return cgrp;
}
/**
* bpf_cgroup_release - Release the reference acquired on a struct cgroup *.
* If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
* not be freed until the current grace period has ended, even if its refcount
* drops to 0.
* @cgrp: The cgroup on which a reference is being released.
*/
void bpf_cgroup_release(struct cgroup *cgrp)
{
if (!cgrp)
return;
cgroup_put(cgrp);
}
/**
* bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
* array. A cgroup returned by this kfunc which is not subsequently stored in a
* map, must be released by calling bpf_cgroup_release().
* @cgrp: The cgroup for which we're performing a lookup.
* @level: The level of ancestor to look up.
*/
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{
struct cgroup *ancestor;
if (level > cgrp->level || level < 0)
return NULL;
ancestor = cgrp->ancestors[level];
cgroup_get(ancestor);
return ancestor;
}
#endif /* CONFIG_CGROUPS */
void *bpf_cast_to_kern_ctx(void *obj)
{
return obj;
......@@ -1904,6 +1984,13 @@ BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
#endif
BTF_SET8_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
......@@ -1915,6 +2002,10 @@ static const struct btf_kfunc_id_set generic_kfunc_set = {
BTF_ID_LIST(generic_dtor_ids)
BTF_ID(struct, task_struct)
BTF_ID(func, bpf_task_release)
#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
BTF_ID(func, bpf_cgroup_release)
#endif
BTF_SET8_START(common_btf_ids)
BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
......@@ -1928,12 +2019,18 @@ static const struct btf_kfunc_id_set common_kfunc_set = {
static int __init kfunc_init(void)
{
int ret;
int ret, idx = 0;
const struct btf_id_dtor_kfunc generic_dtors[] = {
{
.btf_id = generic_dtor_ids[0],
.kfunc_btf_id = generic_dtor_ids[1]
.btf_id = generic_dtor_ids[idx++],
.kfunc_btf_id = generic_dtor_ids[idx++]
},
#ifdef CONFIG_CGROUPS
{
.btf_id = generic_dtor_ids[idx++],
.kfunc_btf_id = generic_dtor_ids[idx++]
},
#endif
};
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
......
......@@ -10,6 +10,7 @@ bpf_nf # JIT does not support calling kernel f
bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
cb_refs # expected error message unexpected error: -524 (trampoline)
cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc)
cgrp_kfunc # JIT does not support calling kernel function
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
core_read_macros # unknown func bpf_probe_read#4 (overlapping)
d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <cgroup_helpers.h>
#include <test_progs.h>
#include "cgrp_kfunc_failure.skel.h"
#include "cgrp_kfunc_success.skel.h"
static size_t log_buf_sz = 1 << 20; /* 1 MB */
static char obj_log_buf[1048576];
static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
{
struct cgrp_kfunc_success *skel;
int err;
skel = cgrp_kfunc_success__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return NULL;
skel->bss->pid = getpid();
err = cgrp_kfunc_success__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
return skel;
cleanup:
cgrp_kfunc_success__destroy(skel);
return NULL;
}
static int mkdir_rm_test_dir(void)
{
int fd;
const char *cgrp_path = "cgrp_kfunc";
fd = create_and_get_cgroup(cgrp_path);
if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd"))
return -1;
close(fd);
remove_cgroup(cgrp_path);
return 0;
}
static void run_success_test(const char *prog_name)
{
struct cgrp_kfunc_success *skel;
struct bpf_program *prog;
struct bpf_link *link = NULL;
skel = open_load_cgrp_kfunc_skel();
if (!ASSERT_OK_PTR(skel, "open_load_skel"))
return;
if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "attached_link"))
goto cleanup;
ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count");
if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir"))
goto cleanup;
ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count");
ASSERT_OK(skel->bss->err, "post_rmdir_err");
cleanup:
bpf_link__destroy(link);
cgrp_kfunc_success__destroy(skel);
}
static const char * const success_tests[] = {
"test_cgrp_acquire_release_argument",
"test_cgrp_acquire_leave_in_map",
"test_cgrp_xchg_release",
"test_cgrp_get_release",
"test_cgrp_get_ancestors",
};
static struct {
const char *prog_name;
const char *expected_err_msg;
} failure_tests[] = {
{"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
{"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"},
{"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
{"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
{"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"},
{"cgrp_kfunc_acquire_unreleased", "Unreleased reference"},
{"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
{"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
{"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"},
{"cgrp_kfunc_xchg_unreleased", "Unreleased reference"},
{"cgrp_kfunc_get_unreleased", "Unreleased reference"},
{"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
{"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"},
{"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
{"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"},
};
static void verify_fail(const char *prog_name, const char *expected_err_msg)
{
LIBBPF_OPTS(bpf_object_open_opts, opts);
struct cgrp_kfunc_failure *skel;
int err, i;
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 1;
skel = cgrp_kfunc_failure__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
struct bpf_program *prog;
const char *curr_name = failure_tests[i].prog_name;
prog = bpf_object__find_program_by_name(skel->obj, curr_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
}
err = cgrp_kfunc_failure__load(skel);
if (!ASSERT_ERR(err, "unexpected load success"))
goto cleanup;
if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
}
cleanup:
cgrp_kfunc_failure__destroy(skel);
}
void test_cgrp_kfunc(void)
{
int i, err;
err = setup_cgroup_environment();
if (!ASSERT_OK(err, "cgrp_env_setup"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i]))
continue;
run_success_test(success_tests[i]);
}
for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
if (!test__start_subtest(failure_tests[i].prog_name))
continue;
verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
}
cleanup:
cleanup_cgroup_environment();
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#ifndef _CGRP_KFUNC_COMMON_H
#define _CGRP_KFUNC_COMMON_H
#include <errno.h>
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct __cgrps_kfunc_map_value {
struct cgroup __kptr_ref * cgrp;
};
struct hash_map {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct __cgrps_kfunc_map_value);
__uint(max_entries, 1);
} __cgrps_kfunc_map SEC(".maps");
struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym;
void bpf_cgroup_release(struct cgroup *p) __ksym;
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp)
{
s32 id;
long status;
status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
if (status)
return NULL;
return bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
}
static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp)
{
struct __cgrps_kfunc_map_value local, *v;
long status;
struct cgroup *acquired, *old;
s32 id;
status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
if (status)
return status;
local.cgrp = NULL;
status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
if (status)
return status;
v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
if (!v) {
bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
return -ENOENT;
}
acquired = bpf_cgroup_acquire(cgrp);
old = bpf_kptr_xchg(&v->cgrp, acquired);
if (old) {
bpf_cgroup_release(old);
return -EEXIST;
}
return 0;
}
#endif /* _CGRP_KFUNC_COMMON_H */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "cgrp_kfunc_common.h"
char _license[] SEC("license") = "GPL";
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(cgroup_mkdir,
* TP_PROTO(struct cgroup *cgrp, const char *path),
* TP_ARGS(cgrp, path)
*/
static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
{
int status;
status = cgrps_kfunc_map_insert(cgrp);
if (status)
return NULL;
return cgrps_kfunc_map_value_lookup(cgrp);
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
/* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */
acquired = bpf_cgroup_acquire(v->cgrp);
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
/* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */
acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp);
bpf_cgroup_release(acquired);
return 0;
}
SEC("kretprobe/cgroup_destroy_locked")
int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
{
struct cgroup *acquired;
/* Can't acquire an untrusted struct cgroup * pointer. */
acquired = bpf_cgroup_acquire(cgrp);
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
/* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */
acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp);
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
/* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */
acquired = bpf_cgroup_acquire(NULL);
if (!acquired)
return 0;
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
acquired = bpf_cgroup_acquire(cgrp);
/* Acquired cgroup is never released. */
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
/* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */
kptr = bpf_cgroup_kptr_get(&cgrp);
if (!kptr)
return 0;
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr, *acquired;
acquired = bpf_cgroup_acquire(cgrp);
/* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */
kptr = bpf_cgroup_kptr_get(&acquired);
bpf_cgroup_release(acquired);
if (!kptr)
return 0;
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
/* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */
kptr = bpf_cgroup_kptr_get(NULL);
if (!kptr)
return 0;
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
kptr = bpf_kptr_xchg(&v->cgrp, NULL);
if (!kptr)
return 0;
/* Kptr retrieved from map is never released. */
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
kptr = bpf_cgroup_kptr_get(&v->cgrp);
if (!kptr)
return 0;
/* Kptr acquired above is never released. */
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
/* Can't invoke bpf_cgroup_release() on an untrusted pointer. */
bpf_cgroup_release(v->cgrp);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired = (struct cgroup *)&path;
/* Cannot release random frame pointer. */
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value local, *v;
long status;
struct cgroup *acquired, *old;
s32 id;
status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
if (status)
return 0;
local.cgrp = NULL;
status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
if (status)
return status;
v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
if (!v)
return -ENOENT;
acquired = bpf_cgroup_acquire(cgrp);
old = bpf_kptr_xchg(&v->cgrp, acquired);
/* old cannot be passed to bpf_cgroup_release() without a NULL check. */
bpf_cgroup_release(old);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
{
/* Cannot release trusted cgroup pointer which was not acquired. */
bpf_cgroup_release(cgrp);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "cgrp_kfunc_common.h"
char _license[] SEC("license") = "GPL";
int err, pid, invocations;
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(cgroup_mkdir,
* TP_PROTO(struct cgroup *cgrp, const char *path),
* TP_ARGS(cgrp, path)
*/
static bool is_test_kfunc_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
bool same = pid == cur_pid;
if (same)
__sync_fetch_and_add(&invocations, 1);
return same;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
if (!is_test_kfunc_task())
return 0;
acquired = bpf_cgroup_acquire(cgrp);
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
{
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status)
err = 1;
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status) {
err = 1;
return 0;
}
v = cgrps_kfunc_map_value_lookup(cgrp);
if (!v) {
err = 2;
return 0;
}
kptr = bpf_kptr_xchg(&v->cgrp, NULL);
if (!kptr) {
err = 3;
return 0;
}
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status) {
err = 1;
return 0;
}
v = cgrps_kfunc_map_value_lookup(cgrp);
if (!v) {
err = 2;
return 0;
}
kptr = bpf_cgroup_kptr_get(&v->cgrp);
if (!kptr) {
err = 3;
return 0;
}
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path)
{
struct cgroup *self, *ancestor1, *invalid;
if (!is_test_kfunc_task())
return 0;
self = bpf_cgroup_ancestor(cgrp, cgrp->level);
if (!self) {
err = 1;
return 0;
}
if (self->self.id != cgrp->self.id) {
bpf_cgroup_release(self);
err = 2;
return 0;
}
bpf_cgroup_release(self);
ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
if (!ancestor1) {
err = 3;
return 0;
}
bpf_cgroup_release(ancestor1);
invalid = bpf_cgroup_ancestor(cgrp, 10000);
if (invalid) {
bpf_cgroup_release(invalid);
err = 4;
return 0;
}
invalid = bpf_cgroup_ancestor(cgrp, -1);
if (invalid) {
bpf_cgroup_release(invalid);
err = 5;
return 0;
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment