Commit 7aa424e0 authored by Feng Zhou's avatar Feng Zhou Committed by Andrii Nakryiko

selftests/bpf: Fix some bugs in map_lookup_percpu_elem testcase

comments from Andrii Nakryiko, details in here:
https://lore.kernel.org/lkml/20220511093854.411-1-zhoufeng.zf@bytedance.com/T/

use /* */ instead of //
use libbpf_num_possible_cpus() instead of sysconf(_SC_NPROCESSORS_ONLN)
use 8 bytes for value size
fix memory leak
use ASSERT_EQ instead of ASSERT_OK
add bpf_loop to fetch values on each possible CPU

Fixes: ed7c1377 ("selftests/bpf: add test case for bpf_map_lookup_percpu_elem")
Signed-off-by: default avatarFeng Zhou <zhoufeng.zf@bytedance.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20220518025053.20492-1-zhoufeng.zf@bytedance.com
parent 834650b5
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Bytedance
/* Copyright (c) 2022 Bytedance */
#include <test_progs.h>
#include "test_map_lookup_percpu_elem.skel.h"
#define TEST_VALUE 1
void test_map_lookup_percpu_elem(void)
{
struct test_map_lookup_percpu_elem *skel;
int key = 0, ret;
int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
int *buf;
__u64 key = 0, sum;
int ret, i, nr_cpus = libbpf_num_possible_cpus();
__u64 *buf;
buf = (int *)malloc(nr_cpus*sizeof(int));
buf = malloc(nr_cpus*sizeof(__u64));
if (!ASSERT_OK_PTR(buf, "malloc"))
return;
memset(buf, 0, nr_cpus*sizeof(int));
buf[0] = TEST_VALUE;
skel = test_map_lookup_percpu_elem__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open_and_load"))
return;
for (i = 0; i < nr_cpus; i++)
buf[i] = i;
sum = (nr_cpus - 1) * nr_cpus / 2;
skel = test_map_lookup_percpu_elem__open();
if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
goto exit;
skel->rodata->my_pid = getpid();
skel->rodata->nr_cpus = nr_cpus;
ret = test_map_lookup_percpu_elem__load(skel);
if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
goto cleanup;
ret = test_map_lookup_percpu_elem__attach(skel);
ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach");
if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
goto cleanup;
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
ASSERT_OK(ret, "percpu_array_map update");
......@@ -37,10 +45,14 @@ void test_map_lookup_percpu_elem(void)
syscall(__NR_getuid);
ret = skel->bss->percpu_array_elem_val == TEST_VALUE &&
skel->bss->percpu_hash_elem_val == TEST_VALUE &&
skel->bss->percpu_lru_hash_elem_val == TEST_VALUE;
ASSERT_OK(!ret, "bpf_map_lookup_percpu_elem success");
test_map_lookup_percpu_elem__detach(skel);
ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");
cleanup:
test_map_lookup_percpu_elem__destroy(skel);
exit:
free(buf);
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Bytedance
/* Copyright (c) 2022 Bytedance */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
int percpu_array_elem_val = 0;
int percpu_hash_elem_val = 0;
int percpu_lru_hash_elem_val = 0;
__u64 percpu_array_elem_sum = 0;
__u64 percpu_hash_elem_sum = 0;
__u64 percpu_lru_hash_elem_sum = 0;
const volatile int nr_cpus;
const volatile int my_pid;
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
__type(value, __u64);
} percpu_array_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
__type(key, __u64);
__type(value, __u64);
} percpu_hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
__type(key, __u64);
__type(value, __u64);
} percpu_lru_hash_map SEC(".maps");
struct read_percpu_elem_ctx {
void *map;
__u64 sum;
};
static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
{
__u64 key = 0;
__u64 *value;
value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
if (value)
ctx->sum += *value;
return 0;
}
SEC("tp/syscalls/sys_enter_getuid")
int sysenter_getuid(const void *ctx)
{
__u32 key = 0;
__u32 cpu = 0;
__u32 *value;
struct read_percpu_elem_ctx map_ctx;
value = bpf_map_lookup_percpu_elem(&percpu_array_map, &key, cpu);
if (value)
percpu_array_elem_val = *value;
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
value = bpf_map_lookup_percpu_elem(&percpu_hash_map, &key, cpu);
if (value)
percpu_hash_elem_val = *value;
map_ctx.map = &percpu_array_map;
map_ctx.sum = 0;
bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
percpu_array_elem_sum = map_ctx.sum;
value = bpf_map_lookup_percpu_elem(&percpu_lru_hash_map, &key, cpu);
if (value)
percpu_lru_hash_elem_val = *value;
map_ctx.map = &percpu_hash_map;
map_ctx.sum = 0;
bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
percpu_hash_elem_sum = map_ctx.sum;
map_ctx.map = &percpu_lru_hash_map;
map_ctx.sum = 0;
bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
percpu_lru_hash_elem_sum = map_ctx.sum;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment