Commit 92d3bff2 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf/selftests: page size fixes'

Yauheni Kaliuta says:

====================

A set of fixes for selftests to make them working on systems with PAGE_SIZE > 4K
+ cleanup (version) and ringbuf_multi extention.
---
v3->v4:
- zero initialize BPF programs' static variables;
- add bpf_map__inner_map to libbpf.map in alphabetical order;
- add bpf_map__set_inner_map_fd test to ringbuf_multi;

v2->v3:
 - reorder: move version removing patch first to keep main patches in
   one group;
 - rename "selftests/bpf: pass page size from userspace in sockopt_sk"
   as suggested;
 - convert sockopt_sk test to use ASSERT macros;
 - set page size from userspace
 - split patches to pairs userspace/bpf. It's easier to check that
   every conversion works as expected;

v1->v2:

- add missed 'selftests/bpf: test_progs/sockopt_sk: Convert to use BPF skeleton'
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 957dca3d cfc0889c
...@@ -2194,6 +2194,7 @@ static int parse_btf_map_def(struct bpf_object *obj, ...@@ -2194,6 +2194,7 @@ static int parse_btf_map_def(struct bpf_object *obj,
map->inner_map = calloc(1, sizeof(*map->inner_map)); map->inner_map = calloc(1, sizeof(*map->inner_map));
if (!map->inner_map) if (!map->inner_map)
return -ENOMEM; return -ENOMEM;
map->inner_map->fd = -1;
map->inner_map->sec_idx = obj->efile.btf_maps_shndx; map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
map->inner_map->name = malloc(strlen(map->name) + map->inner_map->name = malloc(strlen(map->name) +
sizeof(".inner") + 1); sizeof(".inner") + 1);
...@@ -3845,6 +3846,14 @@ __u32 bpf_map__max_entries(const struct bpf_map *map) ...@@ -3845,6 +3846,14 @@ __u32 bpf_map__max_entries(const struct bpf_map *map)
return map->def.max_entries; return map->def.max_entries;
} }
struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
{
if (!bpf_map_type__is_map_in_map(map->def.type))
return NULL;
return map->inner_map;
}
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{ {
if (map->fd >= 0) if (map->fd >= 0)
...@@ -9476,6 +9485,7 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) ...@@ -9476,6 +9485,7 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
pr_warn("error: inner_map_fd already specified\n"); pr_warn("error: inner_map_fd already specified\n");
return -EINVAL; return -EINVAL;
} }
zfree(&map->inner_map);
map->inner_map_fd = fd; map->inner_map_fd = fd;
return 0; return 0;
} }
......
...@@ -480,6 +480,7 @@ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); ...@@ -480,6 +480,7 @@ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
LIBBPF_API long libbpf_get_error(const void *ptr); LIBBPF_API long libbpf_get_error(const void *ptr);
......
...@@ -359,5 +359,6 @@ LIBBPF_0.4.0 { ...@@ -359,5 +359,6 @@ LIBBPF_0.4.0 {
bpf_linker__finalize; bpf_linker__finalize;
bpf_linker__free; bpf_linker__free;
bpf_linker__new; bpf_linker__new;
bpf_map__inner_map;
bpf_object__set_kversion; bpf_object__set_kversion;
} LIBBPF_0.3.0; } LIBBPF_0.3.0;
...@@ -12,11 +12,22 @@ void test_map_ptr(void) ...@@ -12,11 +12,22 @@ void test_map_ptr(void)
__u32 duration = 0, retval; __u32 duration = 0, retval;
char buf[128]; char buf[128];
int err; int err;
int page_size = getpagesize();
skel = map_ptr_kern__open_and_load(); skel = map_ptr_kern__open();
if (CHECK(!skel, "skel_open_load", "open_load failed\n")) if (!ASSERT_OK_PTR(skel, "skel_open"))
return; return;
err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size);
if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
goto cleanup;
err = map_ptr_kern__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
skel->bss->page_size = page_size;
err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4, err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
sizeof(pkt_v4), buf, NULL, &retval, NULL); sizeof(pkt_v4), buf, NULL, &retval, NULL);
......
...@@ -29,22 +29,36 @@ void test_mmap(void) ...@@ -29,22 +29,36 @@ void test_mmap(void)
struct test_mmap *skel; struct test_mmap *skel;
__u64 val = 0; __u64 val = 0;
skel = test_mmap__open_and_load(); skel = test_mmap__open();
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return; return;
err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
/* at least 4 pages of data */
err = bpf_map__set_max_entries(skel->maps.data_map,
4 * (page_size / sizeof(u64)));
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = test_mmap__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
bss_map = skel->maps.bss; bss_map = skel->maps.bss;
data_map = skel->maps.data_map; data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map); data_map_fd = bpf_map__fd(data_map);
rdmap_fd = bpf_map__fd(skel->maps.rdonly_map); rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) { if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
munmap(tmp1, 4096); munmap(tmp1, page_size);
goto cleanup; goto cleanup;
} }
/* now double-check if it's mmap()'able at all */ /* now double-check if it's mmap()'able at all */
tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0); tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno)) if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
goto cleanup; goto cleanup;
......
...@@ -87,11 +87,20 @@ void test_ringbuf(void) ...@@ -87,11 +87,20 @@ void test_ringbuf(void)
pthread_t thread; pthread_t thread;
long bg_ret = -1; long bg_ret = -1;
int err, cnt; int err, cnt;
int page_size = getpagesize();
skel = test_ringbuf__open_and_load(); skel = test_ringbuf__open();
if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n")) if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return; return;
err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = test_ringbuf__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
/* only trigger BPF program for current process */ /* only trigger BPF program for current process */
skel->bss->pid = getpid(); skel->bss->pid = getpid();
...@@ -110,9 +119,9 @@ void test_ringbuf(void) ...@@ -110,9 +119,9 @@ void test_ringbuf(void)
CHECK(skel->bss->avail_data != 3 * rec_sz, CHECK(skel->bss->avail_data != 3 * rec_sz,
"err_avail_size", "exp %ld, got %ld\n", "err_avail_size", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->avail_data); 3L * rec_sz, skel->bss->avail_data);
CHECK(skel->bss->ring_size != 4096, CHECK(skel->bss->ring_size != page_size,
"err_ring_size", "exp %ld, got %ld\n", "err_ring_size", "exp %ld, got %ld\n",
4096L, skel->bss->ring_size); (long)page_size, skel->bss->ring_size);
CHECK(skel->bss->cons_pos != 0, CHECK(skel->bss->cons_pos != 0,
"err_cons_pos", "exp %ld, got %ld\n", "err_cons_pos", "exp %ld, got %ld\n",
0L, skel->bss->cons_pos); 0L, skel->bss->cons_pos);
......
...@@ -41,13 +41,42 @@ static int process_sample(void *ctx, void *data, size_t len) ...@@ -41,13 +41,42 @@ static int process_sample(void *ctx, void *data, size_t len)
void test_ringbuf_multi(void) void test_ringbuf_multi(void)
{ {
struct test_ringbuf_multi *skel; struct test_ringbuf_multi *skel;
struct ring_buffer *ringbuf; struct ring_buffer *ringbuf = NULL;
int err; int err;
int page_size = getpagesize();
int proto_fd = -1;
skel = test_ringbuf_multi__open_and_load(); skel = test_ringbuf_multi__open();
if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n")) if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return; return;
err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
proto_fd = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, page_size, 0);
if (CHECK(proto_fd == -1, "bpf_create_map", "bpf_create_map failed\n"))
goto cleanup;
err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n"))
goto cleanup;
err = test_ringbuf_multi__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
close(proto_fd);
proto_fd = -1;
/* only trigger BPF program for current process */ /* only trigger BPF program for current process */
skel->bss->pid = getpid(); skel->bss->pid = getpid();
...@@ -97,6 +126,8 @@ void test_ringbuf_multi(void) ...@@ -97,6 +126,8 @@ void test_ringbuf_multi(void)
2L, skel->bss->total); 2L, skel->bss->total);
cleanup: cleanup:
if (proto_fd >= 0)
close(proto_fd);
ring_buffer__free(ringbuf); ring_buffer__free(ringbuf);
test_ringbuf_multi__destroy(skel); test_ringbuf_multi__destroy(skel);
} }
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "cgroup_helpers.h" #include "cgroup_helpers.h"
#include <linux/tcp.h> #include <linux/tcp.h>
#include "sockopt_sk.skel.h"
#ifndef SOL_TCP #ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP #define SOL_TCP IPPROTO_TCP
...@@ -191,60 +192,30 @@ static int getsetsockopt(void) ...@@ -191,60 +192,30 @@ static int getsetsockopt(void)
return -1; return -1;
} }
static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) static void run_test(int cgroup_fd)
{ {
enum bpf_attach_type attach_type; struct sockopt_sk *skel;
enum bpf_prog_type prog_type;
struct bpf_program *prog;
int err;
err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); skel = sockopt_sk__open_and_load();
if (err) { if (!ASSERT_OK_PTR(skel, "skel_load"))
log_err("Failed to deduct types for %s BPF program", title); goto cleanup;
return -1;
}
prog = bpf_object__find_program_by_title(obj, title); skel->bss->page_size = getpagesize();
if (!prog) {
log_err("Failed to find %s BPF program", title);
return -1;
}
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
attach_type, 0);
if (err) {
log_err("Failed to attach %s BPF program", title);
return -1;
}
return 0;
}
static void run_test(int cgroup_fd)
{
struct bpf_prog_load_attr attr = {
.file = "./sockopt_sk.o",
};
struct bpf_object *obj;
int ignored;
int err;
err = bpf_prog_load_xattr(&attr, &obj, &ignored);
if (CHECK_FAIL(err))
return;
err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt"); skel->links._setsockopt =
if (CHECK_FAIL(err)) bpf_program__attach_cgroup(skel->progs._setsockopt, cgroup_fd);
goto close_bpf_object; if (!ASSERT_OK_PTR(skel->links._setsockopt, "setsockopt_link"))
goto cleanup;
err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt"); skel->links._getsockopt =
if (CHECK_FAIL(err)) bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
goto close_bpf_object; if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
goto cleanup;
CHECK_FAIL(getsetsockopt()); ASSERT_OK(getsetsockopt(), "getsetsockopt");
close_bpf_object: cleanup:
bpf_object__close(obj); sockopt_sk__destroy(skel);
} }
void test_sockopt_sk(void) void test_sockopt_sk(void)
......
...@@ -12,6 +12,7 @@ _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND"); ...@@ -12,6 +12,7 @@ _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC; enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0; __u32 g_line = 0;
int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \ #define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \ g_map_type = type; \
...@@ -635,7 +636,6 @@ struct bpf_ringbuf_map { ...@@ -635,7 +636,6 @@ struct bpf_ringbuf_map {
struct { struct {
__uint(type, BPF_MAP_TYPE_RINGBUF); __uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} m_ringbuf SEC(".maps"); } m_ringbuf SEC(".maps");
static inline int check_ringbuf(void) static inline int check_ringbuf(void)
...@@ -643,7 +643,7 @@ static inline int check_ringbuf(void) ...@@ -643,7 +643,7 @@ static inline int check_ringbuf(void)
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf; struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf; struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12)); VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1; return 1;
} }
......
...@@ -6,11 +6,8 @@ ...@@ -6,11 +6,8 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1;
#ifndef PAGE_SIZE int page_size = 0; /* userspace should set it */
#define PAGE_SIZE 4096
#endif
#ifndef SOL_TCP #ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP #define SOL_TCP IPPROTO_TCP
...@@ -90,7 +87,7 @@ int _getsockopt(struct bpf_sockopt *ctx) ...@@ -90,7 +87,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE * program can only see the first PAGE_SIZE
* bytes of data. * bytes of data.
*/ */
if (optval_end - optval != PAGE_SIZE) if (optval_end - optval != page_size)
return 0; /* EPERM, unexpected data size */ return 0; /* EPERM, unexpected data size */
return 1; return 1;
...@@ -161,7 +158,7 @@ int _setsockopt(struct bpf_sockopt *ctx) ...@@ -161,7 +158,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) { if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
/* Original optlen is larger than PAGE_SIZE. */ /* Original optlen is larger than PAGE_SIZE. */
if (ctx->optlen != PAGE_SIZE * 2) if (ctx->optlen != page_size * 2)
return 0; /* EPERM, unexpected data size */ return 0; /* EPERM, unexpected data size */
if (optval + 1 > optval_end) if (optval + 1 > optval_end)
...@@ -175,7 +172,7 @@ int _setsockopt(struct bpf_sockopt *ctx) ...@@ -175,7 +172,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE * program can only see the first PAGE_SIZE
* bytes of data. * bytes of data.
*/ */
if (optval_end - optval != PAGE_SIZE) if (optval_end - optval != page_size)
return 0; /* EPERM, unexpected data size */ return 0; /* EPERM, unexpected data size */
return 1; return 1;
......
...@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL"; ...@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL";
struct { struct {
__uint(type, BPF_MAP_TYPE_ARRAY); __uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4096);
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG); __uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
__type(key, __u32); __type(key, __u32);
__type(value, char); __type(value, char);
...@@ -17,7 +16,6 @@ struct { ...@@ -17,7 +16,6 @@ struct {
struct { struct {
__uint(type, BPF_MAP_TYPE_ARRAY); __uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 512 * 4); /* at least 4 pages of data */
__uint(map_flags, BPF_F_MMAPABLE); __uint(map_flags, BPF_F_MMAPABLE);
__type(key, __u32); __type(key, __u32);
__type(value, __u64); __type(value, __u64);
......
...@@ -15,7 +15,6 @@ struct sample { ...@@ -15,7 +15,6 @@ struct sample {
struct { struct {
__uint(type, BPF_MAP_TYPE_RINGBUF); __uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf SEC(".maps"); } ringbuf SEC(".maps");
/* inputs */ /* inputs */
......
...@@ -15,7 +15,6 @@ struct sample { ...@@ -15,7 +15,6 @@ struct sample {
struct ringbuf_map { struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF); __uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf1 SEC(".maps"), } ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps"); ringbuf2 SEC(".maps");
...@@ -31,6 +30,17 @@ struct { ...@@ -31,6 +30,17 @@ struct {
}, },
}; };
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__array(values, struct ringbuf_map);
} ringbuf_hash SEC(".maps") = {
.values = {
[0] = &ringbuf1,
},
};
/* inputs */ /* inputs */
int pid = 0; int pid = 0;
int target_ring = 0; int target_ring = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment