Commit 50c6b8a9 authored by Hao Luo's avatar Hao Luo Committed by Alexei Starovoitov

selftests/bpf: Add a test for btf_type_tag "percpu"

Add test for percpu btf_type_tag. Similar to the "user" tag, we test
the following cases:

 1. __percpu struct field.
 2. __percpu as function parameter.
 3. per_cpu_ptr() accepts dynamically allocated __percpu memory.

Because the test for "user" and the test for "percpu" are very similar,
a little bit of refactoring has been done in btf_tag.c. Basically, both
tests share the same function for loading vmlinux and module btf.

Example output from log:

 > ./test_progs -v -t btf_tag

 libbpf: prog 'test_percpu1': BPF program load failed: Permission denied
 libbpf: prog 'test_percpu1': -- BEGIN PROG LOAD LOG --
 ...
 ; g = arg->a;
 1: (61) r1 = *(u32 *)(r1 +0)
 R1 is ptr_bpf_testmod_btf_type_tag_1 access percpu memory: off=0
 ...
 test_btf_type_tag_mod_percpu:PASS:btf_type_tag_percpu 0 nsec
 #26/6 btf_tag/btf_type_tag_percpu_mod1:OK

 libbpf: prog 'test_percpu2': BPF program load failed: Permission denied
 libbpf: prog 'test_percpu2': -- BEGIN PROG LOAD LOG --
 ...
 ; g = arg->p->a;
 2: (61) r1 = *(u32 *)(r1 +0)
 R1 is ptr_bpf_testmod_btf_type_tag_1 access percpu memory: off=0
 ...
 test_btf_type_tag_mod_percpu:PASS:btf_type_tag_percpu 0 nsec
 #26/7 btf_tag/btf_type_tag_percpu_mod2:OK

 libbpf: prog 'test_percpu_load': BPF program load failed: Permission denied
 libbpf: prog 'test_percpu_load': -- BEGIN PROG LOAD LOG --
 ...
 ; g = (__u64)cgrp->rstat_cpu->updated_children;
 2: (79) r1 = *(u64 *)(r1 +48)
 R1 is ptr_cgroup_rstat_cpu access percpu memory: off=48
 ...
 test_btf_type_tag_vmlinux_percpu:PASS:btf_type_tag_percpu_load 0 nsec
 #26/8 btf_tag/btf_type_tag_percpu_vmlinux_load:OK

 load_btfs:PASS:could not load vmlinux BTF 0 nsec
 test_btf_type_tag_vmlinux_percpu:PASS:btf_type_tag_percpu 0 nsec
 test_btf_type_tag_vmlinux_percpu:PASS:btf_type_tag_percpu_helper 0 nsec
 #26/9 btf_tag/btf_type_tag_percpu_vmlinux_helper:OK
Signed-off-by: default avatarHao Luo <haoluo@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20220304191657.981240-5-haoluo@google.com
parent 5844101a
...@@ -33,6 +33,10 @@ struct bpf_testmod_btf_type_tag_2 { ...@@ -33,6 +33,10 @@ struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 __user *p; struct bpf_testmod_btf_type_tag_1 __user *p;
}; };
struct bpf_testmod_btf_type_tag_3 {
struct bpf_testmod_btf_type_tag_1 __percpu *p;
};
noinline int noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) { bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
BTF_TYPE_EMIT(func_proto_typedef); BTF_TYPE_EMIT(func_proto_typedef);
...@@ -46,6 +50,16 @@ bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) { ...@@ -46,6 +50,16 @@ bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
return arg->p->a; return arg->p->a;
} }
noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
return arg->a;
}
noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
return arg->p->a;
}
noinline int bpf_testmod_loop_test(int n) noinline int bpf_testmod_loop_test(int n)
{ {
int i, sum = 0; int i, sum = 0;
......
...@@ -10,6 +10,7 @@ struct btf_type_tag_test { ...@@ -10,6 +10,7 @@ struct btf_type_tag_test {
}; };
#include "btf_type_tag.skel.h" #include "btf_type_tag.skel.h"
#include "btf_type_tag_user.skel.h" #include "btf_type_tag_user.skel.h"
#include "btf_type_tag_percpu.skel.h"
static void test_btf_decl_tag(void) static void test_btf_decl_tag(void)
{ {
...@@ -43,38 +44,81 @@ static void test_btf_type_tag(void) ...@@ -43,38 +44,81 @@ static void test_btf_type_tag(void)
btf_type_tag__destroy(skel); btf_type_tag__destroy(skel);
} }
static void test_btf_type_tag_mod_user(bool load_test_user1) /* loads vmlinux_btf as well as module_btf. If the caller passes NULL as
* module_btf, it will not load module btf.
*
* Returns 0 on success.
* Return -1 On error. In case of error, the loaded btf will be freed and the
* input parameters will be set to pointing to NULL.
*/
static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf,
bool needs_vmlinux_tag)
{ {
const char *module_name = "bpf_testmod"; const char *module_name = "bpf_testmod";
struct btf *vmlinux_btf, *module_btf;
struct btf_type_tag_user *skel;
__s32 type_id; __s32 type_id;
int err;
if (!env.has_testmod) { if (!env.has_testmod) {
test__skip(); test__skip();
return; return -1;
} }
/* skip the test if the module does not have __user tags */ *vmlinux_btf = btf__load_vmlinux_btf();
vmlinux_btf = btf__load_vmlinux_btf(); if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF"))
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) return -1;
return;
if (!needs_vmlinux_tag)
goto load_module_btf;
module_btf = btf__load_module_btf(module_name, vmlinux_btf); /* skip the test if the vmlinux does not have __user tags */
if (!ASSERT_OK_PTR(module_btf, "could not load module BTF")) type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
test__skip();
goto free_vmlinux_btf; goto free_vmlinux_btf;
}
type_id = btf__find_by_name_kind(module_btf, "user", BTF_KIND_TYPE_TAG); load_module_btf:
/* skip loading module_btf, if not requested by caller */
if (!module_btf)
return 0;
*module_btf = btf__load_module_btf(module_name, *vmlinux_btf);
if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF"))
goto free_vmlinux_btf;
/* skip the test if the module does not have __user tags */
type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) { if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name); printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name);
test__skip(); test__skip();
goto free_module_btf; goto free_module_btf;
} }
return 0;
free_module_btf:
btf__free(*module_btf);
free_vmlinux_btf:
btf__free(*vmlinux_btf);
*vmlinux_btf = NULL;
if (module_btf)
*module_btf = NULL;
return -1;
}
static void test_btf_type_tag_mod_user(bool load_test_user1)
{
struct btf *vmlinux_btf = NULL, *module_btf = NULL;
struct btf_type_tag_user *skel;
int err;
if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
return;
skel = btf_type_tag_user__open(); skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
goto free_module_btf; goto cleanup;
bpf_program__set_autoload(skel->progs.test_sys_getsockname, false); bpf_program__set_autoload(skel->progs.test_sys_getsockname, false);
if (load_test_user1) if (load_test_user1)
...@@ -87,34 +131,23 @@ static void test_btf_type_tag_mod_user(bool load_test_user1) ...@@ -87,34 +131,23 @@ static void test_btf_type_tag_mod_user(bool load_test_user1)
btf_type_tag_user__destroy(skel); btf_type_tag_user__destroy(skel);
free_module_btf: cleanup:
btf__free(module_btf); btf__free(module_btf);
free_vmlinux_btf:
btf__free(vmlinux_btf); btf__free(vmlinux_btf);
} }
static void test_btf_type_tag_vmlinux_user(void) static void test_btf_type_tag_vmlinux_user(void)
{ {
struct btf_type_tag_user *skel; struct btf_type_tag_user *skel;
struct btf *vmlinux_btf; struct btf *vmlinux_btf = NULL;
__s32 type_id;
int err; int err;
/* skip the test if the vmlinux does not have __user tags */ if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
return; return;
type_id = btf__find_by_name_kind(vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
test__skip();
goto free_vmlinux_btf;
}
skel = btf_type_tag_user__open(); skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
goto free_vmlinux_btf; goto cleanup;
bpf_program__set_autoload(skel->progs.test_user2, false); bpf_program__set_autoload(skel->progs.test_user2, false);
bpf_program__set_autoload(skel->progs.test_user1, false); bpf_program__set_autoload(skel->progs.test_user1, false);
...@@ -124,7 +157,70 @@ static void test_btf_type_tag_vmlinux_user(void) ...@@ -124,7 +157,70 @@ static void test_btf_type_tag_vmlinux_user(void)
btf_type_tag_user__destroy(skel); btf_type_tag_user__destroy(skel);
free_vmlinux_btf: cleanup:
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_mod_percpu(bool load_test_percpu1)
{
struct btf *vmlinux_btf, *module_btf;
struct btf_type_tag_percpu *skel;
int err;
if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
return;
skel = btf_type_tag_percpu__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_percpu_load, false);
bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
if (load_test_percpu1)
bpf_program__set_autoload(skel->progs.test_percpu2, false);
else
bpf_program__set_autoload(skel->progs.test_percpu1, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_ERR(err, "btf_type_tag_percpu");
btf_type_tag_percpu__destroy(skel);
cleanup:
btf__free(module_btf);
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_vmlinux_percpu(bool load_test)
{
struct btf_type_tag_percpu *skel;
struct btf *vmlinux_btf = NULL;
int err;
if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
return;
skel = btf_type_tag_percpu__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_percpu2, false);
bpf_program__set_autoload(skel->progs.test_percpu1, false);
if (load_test) {
bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_ERR(err, "btf_type_tag_percpu_load");
} else {
bpf_program__set_autoload(skel->progs.test_percpu_load, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_OK(err, "btf_type_tag_percpu_helper");
}
btf_type_tag_percpu__destroy(skel);
cleanup:
btf__free(vmlinux_btf); btf__free(vmlinux_btf);
} }
...@@ -134,10 +230,20 @@ void test_btf_tag(void) ...@@ -134,10 +230,20 @@ void test_btf_tag(void)
test_btf_decl_tag(); test_btf_decl_tag();
if (test__start_subtest("btf_type_tag")) if (test__start_subtest("btf_type_tag"))
test_btf_type_tag(); test_btf_type_tag();
if (test__start_subtest("btf_type_tag_user_mod1")) if (test__start_subtest("btf_type_tag_user_mod1"))
test_btf_type_tag_mod_user(true); test_btf_type_tag_mod_user(true);
if (test__start_subtest("btf_type_tag_user_mod2")) if (test__start_subtest("btf_type_tag_user_mod2"))
test_btf_type_tag_mod_user(false); test_btf_type_tag_mod_user(false);
if (test__start_subtest("btf_type_tag_sys_user_vmlinux")) if (test__start_subtest("btf_type_tag_sys_user_vmlinux"))
test_btf_type_tag_vmlinux_user(); test_btf_type_tag_vmlinux_user();
if (test__start_subtest("btf_type_tag_percpu_mod1"))
test_btf_type_tag_mod_percpu(true);
if (test__start_subtest("btf_type_tag_percpu_mod2"))
test_btf_type_tag_mod_percpu(false);
if (test__start_subtest("btf_type_tag_percpu_vmlinux_load"))
test_btf_type_tag_vmlinux_percpu(true);
if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper"))
test_btf_type_tag_vmlinux_percpu(false);
} }
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_testmod_btf_type_tag_1 {
int a;
};
struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 *p;
};
__u64 g;
SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1")
int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg)
{
g = arg->a;
return 0;
}
SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2")
int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg)
{
g = arg->p->a;
return 0;
}
/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path)
*
* struct cgroup_rstat_cpu {
* ...
* struct cgroup *updated_children;
* ...
* };
*
* struct cgroup {
* ...
* struct cgroup_rstat_cpu __percpu *rstat_cpu;
* ...
* };
*/
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
{
g = (__u64)cgrp->rstat_cpu->updated_children;
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
{
struct cgroup_rstat_cpu *rstat;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(cgrp->rstat_cpu, cpu);
if (rstat) {
/* READ_ONCE */
*(volatile int *)rstat;
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment