Commit a4ae5c31 authored by Alexis Lothoré (eBPF Foundation)'s avatar Alexis Lothoré (eBPF Foundation) Committed by Martin KaFai Lau

selftests/bpf: convert get_current_cgroup_id_user to test_progs

get_current_cgroup_id_user allows testing for bpf_get_current_cgroup_id()
bpf API but is not integrated into test_progs, and so is not tested
automatically in CI.

Convert it to the test_progs framework to allow running it automatically.
The most notable differences with the old test are the following:
- the new test relies on autoattach instead of manually hooking/enabling
  the targeted tracepoint through perf_event, which reduces quite a lot the
  test code size
- it also accesses bpf prog data through global variables instead of maps
- sleep duration passed to nanosleep syscall has been reduced to its
  minimum to not impact overall CI duration (we only care about the syscall
  being properly triggered, not about the passed duration)
Signed-off-by: default avatarAlexis Lothoré (eBPF Foundation) <alexis.lothore@bootlin.com>
Link: https://lore.kernel.org/r/20240813-convert_cgroup_tests-v4-1-a33c03458cf6@bootlin.comSigned-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parent 4a4c013d
...@@ -19,7 +19,6 @@ test_sock ...@@ -19,7 +19,6 @@ test_sock
urandom_read urandom_read
test_sockmap test_sockmap
test_lirc_mode2_user test_lirc_mode2_user
get_cgroup_id_user
test_skb_cgroup_id_user test_skb_cgroup_id_user
test_cgroup_storage test_cgroup_storage
test_flow_dissector test_flow_dissector
......
...@@ -67,7 +67,7 @@ endif ...@@ -67,7 +67,7 @@ endif
# Order correspond to 'make run_tests' order # Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_sock test_sockmap get_cgroup_id_user \ test_sock test_sockmap \
test_cgroup_storage \ test_cgroup_storage \
test_tcpnotify_user test_sysctl \ test_tcpnotify_user test_sysctl \
test_progs-no_alu32 test_progs-no_alu32
...@@ -295,7 +295,6 @@ $(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) ...@@ -295,7 +295,6 @@ $(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS)
$(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS)
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <syscall.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
printf("%s:FAIL:%s ", __func__, tag); \
printf(format); \
} else { \
printf("%s:PASS:%s\n", __func__, tag); \
} \
__ret; \
})
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
struct bpf_map *map;
map = bpf_object__find_map_by_name(obj, name);
if (!map)
return -1;
return bpf_map__fd(map);
}
#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
int main(int argc, char **argv)
{
const char *probe_name = "syscalls/sys_enter_nanosleep";
const char *file = "get_cgroup_id_kern.bpf.o";
int err, bytes, efd, prog_fd, pmu_fd;
int cgroup_fd, cgidmap_fd, pidmap_fd;
struct perf_event_attr attr = {};
struct bpf_object *obj;
__u64 kcgid = 0, ucgid;
__u32 key = 0, pid;
int exit_code = 1;
char buf[256];
const struct timespec req = {
.tv_sec = 1,
.tv_nsec = 0,
};
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
cgidmap_fd, errno))
goto close_prog;
pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
pidmap_fd, errno))
goto close_prog;
pid = getpid();
bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
snprintf(buf, sizeof(buf),
"/sys/kernel/tracing/events/%s/id", probe_name);
} else {
snprintf(buf, sizeof(buf),
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
}
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
goto close_prog;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
"bytes %d errno %d\n", bytes, errno))
goto close_prog;
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW;
attr.sample_period = 1;
attr.wakeup_events = 1;
/* attach to this pid so the all bpf invocations will be in the
* cgroup associated with this pid.
*/
pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
errno))
goto close_prog;
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
errno))
goto close_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
errno))
goto close_pmu;
/* trigger some syscalls */
syscall(__NR_nanosleep, &req, NULL);
err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
goto close_pmu;
ucgid = get_cgroup_id(TEST_CGROUP);
if (CHECK(kcgid != ucgid, "compare_cgroup_id",
"kern cgid %llx user cgid %llx", kcgid, ucgid))
goto close_pmu;
exit_code = 0;
printf("%s:PASS\n", argv[0]);
close_pmu:
close(pmu_fd);
close_prog:
bpf_object__close(obj);
cleanup_cgroup_env:
cleanup_cgroup_environment();
return exit_code;
}
// SPDX-License-Identifier: GPL-2.0
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "get_cgroup_id_kern.skel.h"
#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
void test_cgroup_get_current_cgroup_id(void)
{
struct get_cgroup_id_kern *skel;
const struct timespec req = {
.tv_sec = 0,
.tv_nsec = 1,
};
int cgroup_fd;
__u64 ucgid;
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch"))
return;
skel = get_cgroup_id_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "load program"))
goto cleanup_cgroup;
if (!ASSERT_OK(get_cgroup_id_kern__attach(skel), "attach bpf program"))
goto cleanup_progs;
skel->bss->expected_pid = getpid();
/* trigger the syscall on which is attached the tested prog */
if (!ASSERT_OK(syscall(__NR_nanosleep, &req, NULL), "nanosleep"))
goto cleanup_progs;
ucgid = get_cgroup_id(TEST_CGROUP);
ASSERT_EQ(skel->bss->cg_id, ucgid, "compare cgroup ids");
cleanup_progs:
get_cgroup_id_kern__destroy(skel);
cleanup_cgroup:
close(cgroup_fd);
cleanup_cgroup_environment();
}
...@@ -4,34 +4,16 @@ ...@@ -4,34 +4,16 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
struct { __u64 cg_id;
__uint(type, BPF_MAP_TYPE_ARRAY); __u64 expected_pid;
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} cg_ids SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} pidmap SEC(".maps");
SEC("tracepoint/syscalls/sys_enter_nanosleep") SEC("tracepoint/syscalls/sys_enter_nanosleep")
int trace(void *ctx) int trace(void *ctx)
{ {
__u32 pid = bpf_get_current_pid_tgid(); __u32 pid = bpf_get_current_pid_tgid();
__u32 key = 0, *expected_pid;
__u64 *val;
expected_pid = bpf_map_lookup_elem(&pidmap, &key);
if (!expected_pid || *expected_pid != pid)
return 0;
val = bpf_map_lookup_elem(&cg_ids, &key); if (expected_pid == pid)
if (val) cg_id = bpf_get_current_cgroup_id();
*val = bpf_get_current_cgroup_id();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment