Commit d6f39601 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

selftests/bpf: Add a test for attaching BPF prog to another BPF prog and subprog

Add a test that attaches one FEXIT program to main sched_cls networking program
and two other FEXIT programs to subprograms. All three tracing programs
access return values and skb->len of networking program and subprograms.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20191114185720.1641606-21-ast@kernel.org
parent 4c096324
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#define PROG_CNT 3
void test_fexit_bpf2bpf(void)
{
const char *prog_name[PROG_CNT] = {
"fexit/test_pkt_access",
"fexit/test_pkt_access_subprog1",
"fexit/test_pkt_access_subprog2",
};
struct bpf_object *obj = NULL, *pkt_obj;
int err, pkt_fd, i;
struct bpf_link *link[PROG_CNT] = {};
struct bpf_program *prog[PROG_CNT];
__u32 duration, retval;
struct bpf_map *data_map;
const int zero = 0;
u64 result[PROG_CNT];
err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.attach_prog_fd = pkt_fd,
);
obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts);
if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
"failed to open fexit_bpf2bpf: %ld\n",
PTR_ERR(obj)))
goto close_prog;
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto close_prog;
for (i = 0; i < PROG_CNT; i++) {
prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i]))
goto close_prog;
link[i] = bpf_program__attach_trace(prog[i]);
if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
goto close_prog;
}
data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto close_prog;
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
if (CHECK(err, "get_result",
"failed to get output data: %d\n", err))
goto close_prog;
for (i = 0; i < PROG_CNT; i++)
if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
result[i]))
goto close_prog;
close_prog:
for (i = 0; i < PROG_CNT; i++)
if (!IS_ERR_OR_NULL(link[i]))
bpf_link__destroy(link[i]);
if (!IS_ERR_OR_NULL(obj))
bpf_object__close(obj);
bpf_object__close(pkt_obj);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct sk_buff {
unsigned int len;
};
struct args {
struct sk_buff *skb;
ks32 ret;
};
static volatile __u64 test_result;
SEC("fexit/test_pkt_access")
int test_main(struct args *ctx)
{
struct sk_buff *skb = ctx->skb;
int len;
__builtin_preserve_access_index(({
len = skb->len;
}));
if (len != 74 || ctx->ret != 0)
return 0;
test_result = 1;
return 0;
}
struct args_subprog1 {
struct sk_buff *skb;
ks32 ret;
};
static volatile __u64 test_result_subprog1;
SEC("fexit/test_pkt_access_subprog1")
int test_subprog1(struct args_subprog1 *ctx)
{
struct sk_buff *skb = ctx->skb;
int len;
__builtin_preserve_access_index(({
len = skb->len;
}));
if (len != 74 || ctx->ret != 148)
return 0;
test_result_subprog1 = 1;
return 0;
}
/* Though test_pkt_access_subprog2() is defined in C as:
* static __attribute__ ((noinline))
* int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
* {
* return skb->len * val;
* }
* llvm optimizations remove 'int val' argument and generate BPF assembly:
* r0 = *(u32 *)(r1 + 0)
* w0 <<= 1
* exit
* In such case the verifier falls back to conservative and
* tracing program can access arguments and return value as u64
* instead of accurate types.
*/
struct args_subprog2 {
ku64 args[5];
ku64 ret;
};
static volatile __u64 test_result_subprog2;
SEC("fexit/test_pkt_access_subprog2")
int test_subprog2(struct args_subprog2 *ctx)
{
struct sk_buff *skb = (void *)ctx->args[0];
__u64 ret;
int len;
bpf_probe_read_kernel(&len, sizeof(len),
__builtin_preserve_access_index(&skb->len));
ret = ctx->ret;
/* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
* which randomizes upper 32 bits after BPF_ALU32 insns.
* Hence after 'w0 <<= 1' upper bits of $rax are random.
* That is expected and correct. Trim them.
*/
ret = (__u32) ret;
if (len != 74 || ret != 148)
return 0;
test_result_subprog2 = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment