Commit b97ce547 authored by Martin KaFai Lau's avatar Martin KaFai Lau

Merge branch 'selftests/bpf: convert three other cgroup tests to test_progs'

Alexis Lothoré (eBPF Foundation) says:

====================
Hello,
this series brings a new set of test converted to the test_progs framework.
Since the tests are quite small, I chose to group three tests conversion in
the same series, but feel free to let me know if I should keep one series
per test. The series focuses on cgroup testing and converts the following
tests:
- get_cgroup_id_user
- cgroup_storage
- test_skb_cgroup_id_user

Changes in v4:
- Fix test after netns addition by making sure loopack interface is up
- Link to v3: https://lore.kernel.org/r/20240812-convert_cgroup_tests-v3-0-47ac6ce4e88b@bootlin.com

Changes in v3:
- Fixed multiple leaks on cgroup file descriptors and sockets
- Used dedicated network namespaces for tests involving network
- Link to v2: https://lore.kernel.org/r/20240806-convert_cgroup_tests-v2-0-180c57e5b710@bootlin.com

Changes in v2:
- Use global variables instead of maps when possible
- Collect review tags from Alan
- Link to v1: https://lore.kernel.org/r/20240731-convert_cgroup_tests-v1-0-14cbc51b6947@bootlin.com
====================
Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parents 4a4c013d f957c230
...@@ -19,9 +19,6 @@ test_sock ...@@ -19,9 +19,6 @@ test_sock
urandom_read urandom_read
test_sockmap test_sockmap
test_lirc_mode2_user test_lirc_mode2_user
get_cgroup_id_user
test_skb_cgroup_id_user
test_cgroup_storage
test_flow_dissector test_flow_dissector
flow_dissector_load flow_dissector_load
test_tcpnotify_user test_tcpnotify_user
......
...@@ -67,8 +67,7 @@ endif ...@@ -67,8 +67,7 @@ endif
# Order correspond to 'make run_tests' order # Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_sock test_sockmap get_cgroup_id_user \ test_sock test_sockmap \
test_cgroup_storage \
test_tcpnotify_user test_sysctl \ test_tcpnotify_user test_sysctl \
test_progs-no_alu32 test_progs-no_alu32
TEST_INST_SUBDIRS := no_alu32 TEST_INST_SUBDIRS := no_alu32
...@@ -138,7 +137,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \ ...@@ -138,7 +137,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
test_xdp_vlan.sh test_bpftool.py test_xdp_vlan.sh test_bpftool.py
# Compile but not part of 'make run_tests' # Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_skb_cgroup_id_user \ TEST_GEN_PROGS_EXTENDED = \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \ test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
...@@ -291,12 +290,9 @@ JSON_WRITER := $(OUTPUT)/json_writer.o ...@@ -291,12 +290,9 @@ JSON_WRITER := $(OUTPUT)/json_writer.o
CAP_HELPERS := $(OUTPUT)/cap_helpers.o CAP_HELPERS := $(OUTPUT)/cap_helpers.o
NETWORK_HELPERS := $(OUTPUT)/network_helpers.o NETWORK_HELPERS := $(OUTPUT)/network_helpers.o
$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS)
$(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tag: $(TESTING_HELPERS) $(OUTPUT)/test_tag: $(TESTING_HELPERS)
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <syscall.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
printf("%s:FAIL:%s ", __func__, tag); \
printf(format); \
} else { \
printf("%s:PASS:%s\n", __func__, tag); \
} \
__ret; \
})
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
struct bpf_map *map;
map = bpf_object__find_map_by_name(obj, name);
if (!map)
return -1;
return bpf_map__fd(map);
}
#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
int main(int argc, char **argv)
{
const char *probe_name = "syscalls/sys_enter_nanosleep";
const char *file = "get_cgroup_id_kern.bpf.o";
int err, bytes, efd, prog_fd, pmu_fd;
int cgroup_fd, cgidmap_fd, pidmap_fd;
struct perf_event_attr attr = {};
struct bpf_object *obj;
__u64 kcgid = 0, ucgid;
__u32 key = 0, pid;
int exit_code = 1;
char buf[256];
const struct timespec req = {
.tv_sec = 1,
.tv_nsec = 0,
};
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
cgidmap_fd, errno))
goto close_prog;
pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
pidmap_fd, errno))
goto close_prog;
pid = getpid();
bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
snprintf(buf, sizeof(buf),
"/sys/kernel/tracing/events/%s/id", probe_name);
} else {
snprintf(buf, sizeof(buf),
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
}
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
goto close_prog;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
"bytes %d errno %d\n", bytes, errno))
goto close_prog;
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW;
attr.sample_period = 1;
attr.wakeup_events = 1;
/* attach to this pid so the all bpf invocations will be in the
* cgroup associated with this pid.
*/
pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
errno))
goto close_prog;
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
errno))
goto close_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
errno))
goto close_pmu;
/* trigger some syscalls */
syscall(__NR_nanosleep, &req, NULL);
err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
goto close_pmu;
ucgid = get_cgroup_id(TEST_CGROUP);
if (CHECK(kcgid != ucgid, "compare_cgroup_id",
"kern cgid %llx user cgid %llx", kcgid, ucgid))
goto close_pmu;
exit_code = 0;
printf("%s:PASS\n", argv[0]);
close_pmu:
close(pmu_fd);
close_prog:
bpf_object__close(obj);
cleanup_cgroup_env:
cleanup_cgroup_environment();
return exit_code;
}
// SPDX-License-Identifier: GPL-2.0
#include "test_progs.h"
#include "network_helpers.h"
#include "cgroup_helpers.h"
#include "cgroup_ancestor.skel.h"
#define CGROUP_PATH "/skb_cgroup_test"
#define TEST_NS "cgroup_ancestor_ns"
#define NUM_CGROUP_LEVELS 4
#define WAIT_AUTO_IP_MAX_ATTEMPT 10
#define DST_ADDR "::1"
#define DST_PORT 1234
#define MAX_ASSERT_NAME 32
struct test_data {
struct cgroup_ancestor *skel;
struct bpf_tc_hook qdisc;
struct bpf_tc_opts tc_attach;
struct nstoken *ns;
};
static int send_datagram(void)
{
unsigned char buf[] = "some random test data";
struct sockaddr_in6 addr = { .sin6_family = AF_INET6,
.sin6_port = htons(DST_PORT), };
int sock, n;
if (!ASSERT_EQ(inet_pton(AF_INET6, DST_ADDR, &addr.sin6_addr), 1,
"inet_pton"))
return -1;
sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (!ASSERT_OK_FD(sock, "create socket"))
return sock;
if (!ASSERT_OK(connect(sock, &addr, sizeof(addr)), "connect")) {
close(sock);
return -1;
}
n = sendto(sock, buf, sizeof(buf), 0, (const struct sockaddr *)&addr,
sizeof(addr));
close(sock);
return ASSERT_EQ(n, sizeof(buf), "send data") ? 0 : -1;
}
static int setup_network(struct test_data *t)
{
SYS(fail, "ip netns add %s", TEST_NS);
t->ns = open_netns(TEST_NS);
if (!ASSERT_OK_PTR(t->ns, "open netns"))
goto cleanup_ns;
SYS(close_ns, "ip link set lo up");
memset(&t->qdisc, 0, sizeof(t->qdisc));
t->qdisc.sz = sizeof(t->qdisc);
t->qdisc.attach_point = BPF_TC_EGRESS;
t->qdisc.ifindex = if_nametoindex("lo");
if (!ASSERT_NEQ(t->qdisc.ifindex, 0, "if_nametoindex"))
goto close_ns;
if (!ASSERT_OK(bpf_tc_hook_create(&t->qdisc), "qdisc add"))
goto close_ns;
memset(&t->tc_attach, 0, sizeof(t->tc_attach));
t->tc_attach.sz = sizeof(t->tc_attach);
t->tc_attach.prog_fd = bpf_program__fd(t->skel->progs.log_cgroup_id);
if (!ASSERT_OK(bpf_tc_attach(&t->qdisc, &t->tc_attach), "filter add"))
goto cleanup_qdisc;
return 0;
cleanup_qdisc:
bpf_tc_hook_destroy(&t->qdisc);
close_ns:
close_netns(t->ns);
cleanup_ns:
SYS_NOFAIL("ip netns del %s", TEST_NS);
fail:
return 1;
}
static void cleanup_network(struct test_data *t)
{
bpf_tc_detach(&t->qdisc, &t->tc_attach);
bpf_tc_hook_destroy(&t->qdisc);
close_netns(t->ns);
SYS_NOFAIL("ip netns del %s", TEST_NS);
}
static void check_ancestors_ids(struct test_data *t)
{
__u64 expected_ids[NUM_CGROUP_LEVELS];
char assert_name[MAX_ASSERT_NAME];
__u32 level;
expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
expected_ids[1] = get_cgroup_id("");
expected_ids[2] = get_cgroup_id(CGROUP_PATH);
expected_ids[3] = 0; /* non-existent cgroup */
for (level = 0; level < NUM_CGROUP_LEVELS; level++) {
snprintf(assert_name, MAX_ASSERT_NAME,
"ancestor id at level %d", level);
ASSERT_EQ(t->skel->bss->cgroup_ids[level], expected_ids[level],
assert_name);
}
}
void test_cgroup_ancestor(void)
{
struct test_data t;
int cgroup_fd;
t.skel = cgroup_ancestor__open_and_load();
if (!ASSERT_OK_PTR(t.skel, "open and load"))
return;
t.skel->bss->dport = htons(DST_PORT);
cgroup_fd = cgroup_setup_and_join(CGROUP_PATH);
if (cgroup_fd < 0)
goto cleanup_progs;
if (setup_network(&t))
goto cleanup_cgroups;
if (send_datagram())
goto cleanup_network;
check_ancestors_ids(&t);
cleanup_network:
cleanup_network(&t);
cleanup_cgroups:
close(cgroup_fd);
cleanup_cgroup_environment();
cleanup_progs:
cgroup_ancestor__destroy(t.skel);
}
// SPDX-License-Identifier: GPL-2.0
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "get_cgroup_id_kern.skel.h"
#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
void test_cgroup_get_current_cgroup_id(void)
{
struct get_cgroup_id_kern *skel;
const struct timespec req = {
.tv_sec = 0,
.tv_nsec = 1,
};
int cgroup_fd;
__u64 ucgid;
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch"))
return;
skel = get_cgroup_id_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "load program"))
goto cleanup_cgroup;
if (!ASSERT_OK(get_cgroup_id_kern__attach(skel), "attach bpf program"))
goto cleanup_progs;
skel->bss->expected_pid = getpid();
/* trigger the syscall on which is attached the tested prog */
if (!ASSERT_OK(syscall(__NR_nanosleep, &req, NULL), "nanosleep"))
goto cleanup_progs;
ucgid = get_cgroup_id(TEST_CGROUP);
ASSERT_EQ(skel->bss->cg_id, ucgid, "compare cgroup ids");
cleanup_progs:
get_cgroup_id_kern__destroy(skel);
cleanup_cgroup:
close(cgroup_fd);
cleanup_cgroup_environment();
}
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "cgroup_storage.skel.h"
#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
#define TEST_NS "cgroup_storage_ns"
#define PING_CMD "ping localhost -c 1 -W 1 -q"
static int setup_network(struct nstoken **token)
{
SYS(fail, "ip netns add %s", TEST_NS);
*token = open_netns(TEST_NS);
if (!ASSERT_OK_PTR(*token, "open netns"))
goto cleanup_ns;
SYS(cleanup_ns, "ip link set lo up");
return 0;
cleanup_ns:
SYS_NOFAIL("ip netns del %s", TEST_NS);
fail:
return -1;
}
static void cleanup_network(struct nstoken *ns)
{
close_netns(ns);
SYS_NOFAIL("ip netns del %s", TEST_NS);
}
void test_cgroup_storage(void)
{
struct bpf_cgroup_storage_key key;
struct cgroup_storage *skel;
struct nstoken *ns = NULL;
unsigned long long value;
int cgroup_fd;
int err;
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (!ASSERT_OK_FD(cgroup_fd, "create cgroup"))
return;
if (!ASSERT_OK(setup_network(&ns), "setup network"))
goto cleanup_cgroup;
skel = cgroup_storage__open_and_load();
if (!ASSERT_OK_PTR(skel, "load program"))
goto cleanup_network;
skel->links.bpf_prog =
bpf_program__attach_cgroup(skel->progs.bpf_prog, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.bpf_prog, "attach program"))
goto cleanup_progs;
/* Check that one out of every two packets is dropped */
err = SYS_NOFAIL(PING_CMD);
ASSERT_OK(err, "first ping");
err = SYS_NOFAIL(PING_CMD);
ASSERT_NEQ(err, 0, "second ping");
err = SYS_NOFAIL(PING_CMD);
ASSERT_OK(err, "third ping");
err = bpf_map__get_next_key(skel->maps.cgroup_storage, NULL, &key,
sizeof(key));
if (!ASSERT_OK(err, "get first key"))
goto cleanup_progs;
err = bpf_map__lookup_elem(skel->maps.cgroup_storage, &key, sizeof(key),
&value, sizeof(value), 0);
if (!ASSERT_OK(err, "first packet count read"))
goto cleanup_progs;
/* Add one to the packet counter, check again packet filtering */
value++;
err = bpf_map__update_elem(skel->maps.cgroup_storage, &key, sizeof(key),
&value, sizeof(value), 0);
if (!ASSERT_OK(err, "increment packet counter"))
goto cleanup_progs;
err = SYS_NOFAIL(PING_CMD);
ASSERT_OK(err, "fourth ping");
err = SYS_NOFAIL(PING_CMD);
ASSERT_NEQ(err, 0, "fifth ping");
err = SYS_NOFAIL(PING_CMD);
ASSERT_OK(err, "sixth ping");
cleanup_progs:
cgroup_storage__destroy(skel);
cleanup_network:
cleanup_network(ns);
cleanup_cgroup:
close(cgroup_fd);
cleanup_cgroup_environment();
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook // Copyright (c) 2018 Facebook
#include <linux/bpf.h> #include <vmlinux.h>
#include <linux/pkt_cls.h>
#include <string.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_tracing_net.h"
#define NUM_CGROUP_LEVELS 4 #define NUM_CGROUP_LEVELS 4
struct { __u64 cgroup_ids[NUM_CGROUP_LEVELS];
__uint(type, BPF_MAP_TYPE_ARRAY); __u16 dport;
__type(key, __u32);
__type(value, __u64);
__uint(max_entries, NUM_CGROUP_LEVELS);
} cgroup_ids SEC(".maps");
static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
{ {
__u64 id;
/* [1] &level passed to external function that may change it, it's /* [1] &level passed to external function that may change it, it's
* incompatible with loop unroll. * incompatible with loop unroll.
*/ */
id = bpf_skb_ancestor_cgroup_id(skb, level); cgroup_ids[level] = bpf_skb_ancestor_cgroup_id(skb, level);
bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
} }
SEC("cgroup_id_logger") SEC("tc")
int log_cgroup_id(struct __sk_buff *skb) int log_cgroup_id(struct __sk_buff *skb)
{ {
/* Loop unroll can't be used here due to [1]. Unrolling manually. struct sock *sk = (void *)skb->sk;
* Number of calls should be in sync with NUM_CGROUP_LEVELS.
*/ if (!sk)
return TC_ACT_OK;
sk = bpf_core_cast(sk, struct sock);
if (sk->sk_protocol == IPPROTO_UDP && sk->sk_dport == dport) {
log_nth_level(skb, 0); log_nth_level(skb, 0);
log_nth_level(skb, 1); log_nth_level(skb, 1);
log_nth_level(skb, 2); log_nth_level(skb, 2);
log_nth_level(skb, 3); log_nth_level(skb, 3);
}
return TC_ACT_OK; return TC_ACT_OK;
} }
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u64);
} cgroup_storage SEC(".maps");
SEC("cgroup_skb/egress")
int bpf_prog(struct __sk_buff *skb)
{
__u64 *counter;
counter = bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(counter, 1);
/* Drop one out of every two packets */
return (*counter & 1);
}
char _license[] SEC("license") = "GPL";
...@@ -4,34 +4,16 @@ ...@@ -4,34 +4,16 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
struct { __u64 cg_id;
__uint(type, BPF_MAP_TYPE_ARRAY); __u64 expected_pid;
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} cg_ids SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} pidmap SEC(".maps");
SEC("tracepoint/syscalls/sys_enter_nanosleep") SEC("tracepoint/syscalls/sys_enter_nanosleep")
int trace(void *ctx) int trace(void *ctx)
{ {
__u32 pid = bpf_get_current_pid_tgid(); __u32 pid = bpf_get_current_pid_tgid();
__u32 key = 0, *expected_pid;
__u64 *val;
expected_pid = bpf_map_lookup_elem(&pidmap, &key);
if (!expected_pid || *expected_pid != pid)
return 0;
val = bpf_map_lookup_elem(&cg_ids, &key); if (expected_pid == pid)
if (val) cg_id = bpf_get_current_cgroup_id();
*val = bpf_get_current_cgroup_id();
return 0; return 0;
} }
......
// SPDX-License-Identifier: GPL-2.0
#include <assert.h>
#include <bpf/bpf.h>
#include <linux/filter.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysinfo.h>
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
int main(int argc, char **argv)
{
struct bpf_insn prog[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(prog);
int error = EXIT_FAILURE;
int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
struct bpf_cgroup_storage_key key;
unsigned long long value;
unsigned long long *percpu_value;
int cpu, nproc;
nproc = bpf_num_possible_cpus();
percpu_value = malloc(sizeof(*percpu_value) * nproc);
if (!percpu_value) {
printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
goto err;
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key),
sizeof(value), 0, NULL);
if (map_fd < 0) {
printf("Failed to create map: %s\n", strerror(errno));
goto out;
}
percpu_map_fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL,
sizeof(key), sizeof(value), 0, NULL);
if (percpu_map_fd < 0) {
printf("Failed to create map: %s\n", strerror(errno));
goto out;
}
prog[0].imm = percpu_map_fd;
prog[7].imm = map_fd;
prog_fd = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
if (prog_fd < 0) {
printf("Failed to load bpf program: %s\n", bpf_log_buf);
goto out;
}
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
/* Attach the bpf program */
if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
printf("Failed to attach bpf program\n");
goto err;
}
if (bpf_map_get_next_key(map_fd, NULL, &key)) {
printf("Failed to get the first key in cgroup storage\n");
goto err;
}
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup cgroup storage 0\n");
goto err;
}
for (cpu = 0; cpu < nproc; cpu++)
percpu_value[cpu] = 1000;
if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) {
printf("Failed to update the data in the cgroup storage\n");
goto err;
}
/* Every second packet should be dropped */
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
/* Check the counter in the cgroup local storage */
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup cgroup storage\n");
goto err;
}
if (value != 3) {
printf("Unexpected data in the cgroup storage: %llu\n", value);
goto err;
}
/* Bump the counter in the cgroup local storage */
value++;
if (bpf_map_update_elem(map_fd, &key, &value, 0)) {
printf("Failed to update the data in the cgroup storage\n");
goto err;
}
/* Every second packet should be dropped */
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
/* Check the final value of the counter in the cgroup local storage */
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup the cgroup storage\n");
goto err;
}
if (value != 7) {
printf("Unexpected data in the cgroup storage: %llu\n", value);
goto err;
}
/* Check the final value of the counter in the percpu local storage */
for (cpu = 0; cpu < nproc; cpu++)
percpu_value[cpu] = 0;
if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) {
printf("Failed to lookup the per-cpu cgroup storage\n");
goto err;
}
value = 0;
for (cpu = 0; cpu < nproc; cpu++)
value += percpu_value[cpu];
if (value != nproc * 1000 + 6) {
printf("Unexpected data in the per-cpu cgroup storage\n");
goto err;
}
error = 0;
printf("test_cgroup_storage:PASS\n");
err:
cleanup_cgroup_environment();
free(percpu_value);
out:
return error;
}
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Facebook
set -eu
wait_for_ip()
{
local _i
echo -n "Wait for testing link-local IP to become available "
for _i in $(seq ${MAX_PING_TRIES}); do
echo -n "."
if $PING6 -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
echo " OK"
return
fi
sleep 1
done
echo 1>&2 "ERROR: Timeout waiting for test IP to become available."
exit 1
}
setup()
{
# Create testing interfaces not to interfere with current environment.
ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER}
ip link set ${TEST_IF} up
ip link set ${TEST_IF_PEER} up
wait_for_ip
tc qdisc add dev ${TEST_IF} clsact
tc filter add dev ${TEST_IF} egress bpf obj ${BPF_PROG_OBJ} \
sec ${BPF_PROG_SECTION} da
BPF_PROG_ID=$(tc filter show dev ${TEST_IF} egress | \
awk '/ id / {sub(/.* id /, "", $0); print($1)}')
}
cleanup()
{
ip link del ${TEST_IF} 2>/dev/null || :
ip link del ${TEST_IF_PEER} 2>/dev/null || :
}
main()
{
trap cleanup EXIT 2 3 6 15
setup
${PROG} ${TEST_IF} ${BPF_PROG_ID}
}
DIR=$(dirname $0)
TEST_IF="test_cgid_1"
TEST_IF_PEER="test_cgid_2"
MAX_PING_TRIES=5
BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.bpf.o"
BPF_PROG_SECTION="cgroup_id_logger"
BPF_PROG_ID=0
PROG="${DIR}/test_skb_cgroup_id_user"
type ping6 >/dev/null 2>&1 && PING6="ping6" || PING6="ping -6"
main
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#define CGROUP_PATH "/skb_cgroup_test"
#define NUM_CGROUP_LEVELS 4
/* RFC 4291, Section 2.7.1 */
#define LINKLOCAL_MULTICAST "ff02::1"
static int mk_dst_addr(const char *ip, const char *iface,
struct sockaddr_in6 *dst)
{
memset(dst, 0, sizeof(*dst));
dst->sin6_family = AF_INET6;
dst->sin6_port = htons(1025);
if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) {
log_err("Invalid IPv6: %s", ip);
return -1;
}
dst->sin6_scope_id = if_nametoindex(iface);
if (!dst->sin6_scope_id) {
log_err("Failed to get index of iface: %s", iface);
return -1;
}
return 0;
}
static int send_packet(const char *iface)
{
struct sockaddr_in6 dst;
char msg[] = "msg";
int err = 0;
int fd = -1;
if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst))
goto err;
fd = socket(AF_INET6, SOCK_DGRAM, 0);
if (fd == -1) {
log_err("Failed to create UDP socket");
goto err;
}
if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst,
sizeof(dst)) == -1) {
log_err("Failed to send datagram");
goto err;
}
goto out;
err:
err = -1;
out:
if (fd >= 0)
close(fd);
return err;
}
int get_map_fd_by_prog_id(int prog_id)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 map_ids[1];
int prog_fd = -1;
int map_fd = -1;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0) {
log_err("Failed to get fd by prog id %d", prog_id);
goto err;
}
info.nr_map_ids = 1;
info.map_ids = (__u64) (unsigned long) map_ids;
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
log_err("Failed to get info by prog fd %d", prog_fd);
goto err;
}
if (!info.nr_map_ids) {
log_err("No maps found for prog fd %d", prog_fd);
goto err;
}
map_fd = bpf_map_get_fd_by_id(map_ids[0]);
if (map_fd < 0)
log_err("Failed to get fd by map id %d", map_ids[0]);
err:
if (prog_fd >= 0)
close(prog_fd);
return map_fd;
}
int check_ancestor_cgroup_ids(int prog_id)
{
__u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS];
__u32 level;
int err = 0;
int map_fd;
expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
expected_ids[1] = get_cgroup_id("");
expected_ids[2] = get_cgroup_id(CGROUP_PATH);
expected_ids[3] = 0; /* non-existent cgroup */
map_fd = get_map_fd_by_prog_id(prog_id);
if (map_fd < 0)
goto err;
for (level = 0; level < NUM_CGROUP_LEVELS; ++level) {
if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) {
log_err("Failed to lookup key %d", level);
goto err;
}
if (actual_ids[level] != expected_ids[level]) {
log_err("%llx (actual) != %llx (expected), level: %u\n",
actual_ids[level], expected_ids[level], level);
goto err;
}
}
goto out;
err:
err = -1;
out:
if (map_fd >= 0)
close(map_fd);
return err;
}
int main(int argc, char **argv)
{
int cgfd = -1;
int err = 0;
if (argc < 3) {
fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]);
exit(EXIT_FAILURE);
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
cgfd = cgroup_setup_and_join(CGROUP_PATH);
if (cgfd < 0)
goto err;
if (send_packet(argv[1]))
goto err;
if (check_ancestor_cgroup_ids(atoi(argv[2])))
goto err;
goto out;
err:
err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
printf("[%s]\n", err ? "FAIL" : "PASS");
return err;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment