Commit 85fbd233 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-light-skel'

Alexei Starovoitov says:

====================
The libbpf performs a set of complex operations to load BPF programs.
With "loader program" and "CO-RE in the kernel" the loading job of
libbpf was diminished. The light skeleton became lean enough to perform
program loading and map creation tasks without libbpf.
It's now possible to tweak it further to make light skeleton usable
out of user space and out of kernel module.
This allows bpf_preload.ko to drop user-mode-driver usage,
drop host compiler dependency, allow cross compilation and simplify the
code. It's a building block toward safe and portable kernel modules.

v3->v4:
- inlined skel_prep_init_value() as direct assignment in lskel

v2->v3:
- dropped vm_mmap() and switched to bpf_loader_ctx->flags & KERNEL approach.
  It allows bpf_preload.ko to be built-in.
  The kernel is able to load bpf progs before init process starts.
- added comments (Yonghong's review)
- added error checks in lskel (Andrii's review)
- added Acks in all but 2nd patch.

v1->v2:
- removed redundant anon struct and added comments (Andrii's reivew)
- added Yonghong's ack
- fixed build warning when JIT is off
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 4f5e483b cb80ddc6
......@@ -710,11 +710,10 @@ static DEFINE_MUTEX(bpf_preload_lock);
static int populate_bpffs(struct dentry *parent)
{
struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
struct bpf_link *links[BPF_PRELOAD_LINKS] = {};
int err = 0, i;
/* grab the mutex to make sure the kernel interactions with bpf_preload
* UMD are serialized
* are serialized
*/
mutex_lock(&bpf_preload_lock);
......@@ -722,40 +721,22 @@ static int populate_bpffs(struct dentry *parent)
if (!bpf_preload_mod_get())
goto out;
if (!bpf_preload_ops->info.tgid) {
/* preload() will start UMD that will load BPF iterator programs */
err = bpf_preload_ops->preload(objs);
if (err)
err = bpf_preload_ops->preload(objs);
if (err)
goto out_put;
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
bpf_link_inc(objs[i].link);
err = bpf_iter_link_pin_kernel(parent,
objs[i].link_name, objs[i].link);
if (err) {
bpf_link_put(objs[i].link);
goto out_put;
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
links[i] = bpf_link_by_id(objs[i].link_id);
if (IS_ERR(links[i])) {
err = PTR_ERR(links[i]);
goto out_put;
}
}
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
err = bpf_iter_link_pin_kernel(parent,
objs[i].link_name, links[i]);
if (err)
goto out_put;
/* do not unlink successfully pinned links even
* if later link fails to pin
*/
links[i] = NULL;
}
/* finish() will tell UMD process to exit */
err = bpf_preload_ops->finish();
if (err)
goto out_put;
}
out_put:
bpf_preload_mod_put();
out:
mutex_unlock(&bpf_preload_lock);
for (i = 0; i < BPF_PRELOAD_LINKS && err; i++)
if (!IS_ERR_OR_NULL(links[i]))
bpf_link_put(links[i]);
return err;
}
......
......@@ -18,10 +18,9 @@ menuconfig BPF_PRELOAD
if BPF_PRELOAD
config BPF_PRELOAD_UMD
tristate "bpf_preload kernel module with user mode driver"
depends on CC_CAN_LINK
depends on m || CC_CAN_LINK_STATIC
tristate "bpf_preload kernel module"
default m
help
This builds bpf_preload kernel module with embedded user mode driver.
This builds bpf_preload kernel module with embedded BPF programs for
introspection in bpffs.
endif
......@@ -3,16 +3,6 @@
LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
LIBBPF_INCLUDE = $(LIBBPF_SRCS)/..
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
-I $(LIBBPF_INCLUDE) -Wno-unused-result
userprogs := bpf_preload_umd
bpf_preload_umd-objs := iterators/iterators.o
$(obj)/bpf_preload_umd:
$(obj)/bpf_preload_umd_blob.o: $(obj)/bpf_preload_umd
obj-$(CONFIG_BPF_PRELOAD_UMD) += bpf_preload.o
bpf_preload-objs += bpf_preload_kern.o bpf_preload_umd_blob.o
CFLAGS_bpf_preload_kern.o += -I $(LIBBPF_INCLUDE)
bpf_preload-objs += bpf_preload_kern.o
......@@ -2,13 +2,13 @@
#ifndef _BPF_PRELOAD_H
#define _BPF_PRELOAD_H
#include <linux/usermode_driver.h>
#include "iterators/bpf_preload_common.h"
struct bpf_preload_info {
char link_name[16];
struct bpf_link *link;
};
struct bpf_preload_ops {
struct umd_info info;
int (*preload)(struct bpf_preload_info *);
int (*finish)(void);
struct module *owner;
};
extern struct bpf_preload_ops *bpf_preload_ops;
......
......@@ -2,101 +2,80 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pid.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
#include "bpf_preload.h"
#include "iterators/iterators.lskel.h"
extern char bpf_preload_umd_start;
extern char bpf_preload_umd_end;
static struct bpf_link *maps_link, *progs_link;
static struct iterators_bpf *skel;
static int preload(struct bpf_preload_info *obj);
static int finish(void);
static void free_links_and_skel(void)
{
if (!IS_ERR_OR_NULL(maps_link))
bpf_link_put(maps_link);
if (!IS_ERR_OR_NULL(progs_link))
bpf_link_put(progs_link);
iterators_bpf__destroy(skel);
}
static int preload(struct bpf_preload_info *obj)
{
strlcpy(obj[0].link_name, "maps.debug", sizeof(obj[0].link_name));
obj[0].link = maps_link;
strlcpy(obj[1].link_name, "progs.debug", sizeof(obj[1].link_name));
obj[1].link = progs_link;
return 0;
}
static struct bpf_preload_ops umd_ops = {
.info.driver_name = "bpf_preload",
static struct bpf_preload_ops ops = {
.preload = preload,
.finish = finish,
.owner = THIS_MODULE,
};
static int preload(struct bpf_preload_info *obj)
static int load_skel(void)
{
int magic = BPF_PRELOAD_START;
loff_t pos = 0;
int i, err;
ssize_t n;
int err;
err = fork_usermode_driver(&umd_ops.info);
skel = iterators_bpf__open();
if (!skel)
return -ENOMEM;
err = iterators_bpf__load(skel);
if (err)
return err;
/* send the start magic to let UMD proceed with loading BPF progs */
n = kernel_write(umd_ops.info.pipe_to_umh,
&magic, sizeof(magic), &pos);
if (n != sizeof(magic))
return -EPIPE;
/* receive bpf_link IDs and names from UMD */
pos = 0;
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
n = kernel_read(umd_ops.info.pipe_from_umh,
&obj[i], sizeof(*obj), &pos);
if (n != sizeof(*obj))
return -EPIPE;
goto out;
err = iterators_bpf__attach(skel);
if (err)
goto out;
maps_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
if (IS_ERR(maps_link)) {
err = PTR_ERR(maps_link);
goto out;
}
return 0;
}
static int finish(void)
{
int magic = BPF_PRELOAD_END;
struct pid *tgid;
loff_t pos = 0;
ssize_t n;
/* send the last magic to UMD. It will do a normal exit. */
n = kernel_write(umd_ops.info.pipe_to_umh,
&magic, sizeof(magic), &pos);
if (n != sizeof(magic))
return -EPIPE;
tgid = umd_ops.info.tgid;
if (tgid) {
wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
umd_cleanup_helper(&umd_ops.info);
progs_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
if (IS_ERR(progs_link)) {
err = PTR_ERR(progs_link);
goto out;
}
return 0;
out:
free_links_and_skel();
return err;
}
static int __init load_umd(void)
static int __init load(void)
{
int err;
err = umd_load_blob(&umd_ops.info, &bpf_preload_umd_start,
&bpf_preload_umd_end - &bpf_preload_umd_start);
err = load_skel();
if (err)
return err;
bpf_preload_ops = &umd_ops;
bpf_preload_ops = &ops;
return err;
}
static void __exit fini_umd(void)
static void __exit fini(void)
{
struct pid *tgid;
bpf_preload_ops = NULL;
/* kill UMD in case it's still there due to earlier error */
tgid = umd_ops.info.tgid;
if (tgid) {
kill_pid(tgid, SIGKILL, 1);
wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
umd_cleanup_helper(&umd_ops.info);
}
umd_unload_blob(&umd_ops.info);
free_links_and_skel();
}
late_initcall(load_umd);
module_exit(fini_umd);
late_initcall(load);
module_exit(fini);
MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */
.section .init.rodata, "a"
.global bpf_preload_umd_start
bpf_preload_umd_start:
.incbin "kernel/bpf/preload/bpf_preload_umd"
.global bpf_preload_umd_end
bpf_preload_umd_end:
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BPF_PRELOAD_COMMON_H
#define _BPF_PRELOAD_COMMON_H
#define BPF_PRELOAD_START 0x5555
#define BPF_PRELOAD_END 0xAAAA
struct bpf_preload_info {
char link_name[16];
int link_id;
};
#endif
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/resource.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include <sys/mount.h>
#include "iterators.lskel.h"
#include "bpf_preload_common.h"
int to_kernel = -1;
int from_kernel = 0;
static int __bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
{
union bpf_attr attr;
int err;
memset(&attr, 0, sizeof(attr));
attr.info.bpf_fd = bpf_fd;
attr.info.info_len = *info_len;
attr.info.info = (long) info;
err = skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
if (!err)
*info_len = attr.info.info_len;
return err;
}
static int send_link_to_kernel(int link_fd, const char *link_name)
{
struct bpf_preload_info obj = {};
struct bpf_link_info info = {};
__u32 info_len = sizeof(info);
int err;
err = __bpf_obj_get_info_by_fd(link_fd, &info, &info_len);
if (err)
return err;
obj.link_id = info.id;
if (strlen(link_name) >= sizeof(obj.link_name))
return -E2BIG;
strcpy(obj.link_name, link_name);
if (write(to_kernel, &obj, sizeof(obj)) != sizeof(obj))
return -EPIPE;
return 0;
}
int main(int argc, char **argv)
{
struct iterators_bpf *skel;
int err, magic;
int debug_fd;
debug_fd = open("/dev/console", O_WRONLY | O_NOCTTY | O_CLOEXEC);
if (debug_fd < 0)
return 1;
to_kernel = dup(1);
close(1);
dup(debug_fd);
/* now stdin and stderr point to /dev/console */
read(from_kernel, &magic, sizeof(magic));
if (magic != BPF_PRELOAD_START) {
printf("bad start magic %d\n", magic);
return 1;
}
/* libbpf opens BPF object and loads it into the kernel */
skel = iterators_bpf__open_and_load();
if (!skel) {
/* iterators.skel.h is little endian.
* libbpf doesn't support automatic little->big conversion
* of BPF bytecode yet.
* The program load will fail in such case.
*/
printf("Failed load could be due to wrong endianness\n");
return 1;
}
err = iterators_bpf__attach(skel);
if (err)
goto cleanup;
/* send two bpf_link IDs with names to the kernel */
err = send_link_to_kernel(skel->links.dump_bpf_map_fd, "maps.debug");
if (err)
goto cleanup;
err = send_link_to_kernel(skel->links.dump_bpf_prog_fd, "progs.debug");
if (err)
goto cleanup;
/* The kernel will proceed with pinnging the links in bpffs.
* UMD will wait on read from pipe.
*/
read(from_kernel, &magic, sizeof(magic));
if (magic != BPF_PRELOAD_END) {
printf("bad final magic %d\n", magic);
err = -EINVAL;
}
cleanup:
iterators_bpf__destroy(skel);
return err != 0;
}
......@@ -985,6 +985,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
return map;
}
EXPORT_SYMBOL(bpf_map_get);
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
......@@ -2489,6 +2490,7 @@ void bpf_link_put(struct bpf_link *link)
bpf_link_free(link);
}
}
EXPORT_SYMBOL(bpf_link_put);
static int bpf_link_release(struct inode *inode, struct file *filp)
{
......@@ -2631,6 +2633,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
return link;
}
EXPORT_SYMBOL(bpf_link_get_from_fd);
struct bpf_tracing_link {
struct bpf_link link;
......@@ -4756,23 +4759,52 @@ static bool syscall_prog_is_valid_access(int off, int size,
return true;
}
BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
{
struct bpf_prog * __maybe_unused prog;
switch (cmd) {
case BPF_MAP_CREATE:
case BPF_MAP_UPDATE_ELEM:
case BPF_MAP_FREEZE:
case BPF_PROG_LOAD:
case BPF_BTF_LOAD:
case BPF_LINK_CREATE:
case BPF_RAW_TRACEPOINT_OPEN:
break;
/* case BPF_PROG_TEST_RUN:
* is not part of this list to prevent recursive test_run
*/
#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
case BPF_PROG_TEST_RUN:
if (attr->test.data_in || attr->test.data_out ||
attr->test.ctx_out || attr->test.duration ||
attr->test.repeat || attr->test.flags)
return -EINVAL;
prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
if (IS_ERR(prog))
return PTR_ERR(prog);
if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
attr->test.ctx_size_in > U16_MAX) {
bpf_prog_put(prog);
return -EINVAL;
}
if (!__bpf_prog_enter_sleepable(prog)) {
/* recursion detected */
bpf_prog_put(prog);
return -EBUSY;
}
attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */);
bpf_prog_put(prog);
return 0;
#endif
default:
return -EINVAL;
}
return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
}
EXPORT_SYMBOL(bpf_sys_bpf);
static const struct bpf_func_proto bpf_sys_bpf_proto = {
.func = bpf_sys_bpf,
......
......@@ -472,7 +472,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
continue;
if (bpf_map__is_internal(map) &&
(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
printf("\tmunmap(skel->%1$s, %2$zd);\n",
printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
ident, bpf_map_mmap_sz(map));
codegen("\
\n\
......@@ -481,7 +481,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
}
codegen("\
\n\
free(skel); \n\
skel_free(skel); \n\
} \n\
",
obj_name);
......@@ -525,7 +525,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
{ \n\
struct %1$s *skel; \n\
\n\
skel = calloc(sizeof(*skel), 1); \n\
skel = skel_alloc(sizeof(*skel)); \n\
if (!skel) \n\
goto cleanup; \n\
skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
......@@ -543,19 +543,18 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
continue;
codegen("\
\n\
skel->%1$s = \n\
mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\
MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\
if (skel->%1$s == (void *) -1) \n\
goto cleanup; \n\
memcpy(skel->%1$s, (void *)\"\\ \n\
", ident, bpf_map_mmap_sz(map));
\n\
skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
", ident);
mmap_data = bpf_map__initial_value(map, &mmap_size);
print_hex(mmap_data, mmap_size);
printf("\", %2$zd);\n"
"\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n",
ident, mmap_size);
codegen("\
\n\
\", %1$zd, %2$zd); \n\
if (!skel->%3$s) \n\
goto cleanup; \n\
skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
", bpf_map_mmap_sz(map), mmap_size, ident);
}
codegen("\
\n\
......@@ -611,9 +610,13 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
else
mmap_flags = "PROT_READ | PROT_WRITE";
printf("\tskel->%1$s =\n"
"\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n"
"\t\t\tskel->maps.%1$s.map_fd, 0);\n",
codegen("\
\n\
skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
if (!skel->%1$s) \n\
return -ENOMEM; \n\
",
ident, bpf_map_mmap_sz(map), mmap_flags);
}
codegen("\
......@@ -751,8 +754,6 @@ static int do_skeleton(int argc, char **argv)
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <stdlib.h> \n\
#include <bpf/bpf.h> \n\
#include <bpf/skel_internal.h> \n\
\n\
struct %1$s { \n\
......
......@@ -1043,18 +1043,27 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
value = add_data(gen, pvalue, value_size);
key = add_data(gen, &zero, sizeof(zero));
/* if (map_desc[map_idx].initial_value)
* copy_from_user(value, initial_value, value_size);
/* if (map_desc[map_idx].initial_value) {
* if (ctx->flags & BPF_SKEL_KERNEL)
* bpf_probe_read_kernel(value, value_size, initial_value);
* else
* bpf_copy_from_user(value, value_size, initial_value);
* }
*/
emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
sizeof(struct bpf_loader_ctx) +
sizeof(struct bpf_map_desc) * map_idx +
offsetof(struct bpf_map_desc, initial_value)));
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, value));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
offsetof(struct bpf_loader_ctx, flags)));
emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
map_update_attr = add_data(gen, &attr, attr_size);
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
......
......@@ -3,9 +3,19 @@
#ifndef __SKEL_INTERNAL_H
#define __SKEL_INTERNAL_H
#ifdef __KERNEL__
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#else
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <stdlib.h>
#include "bpf.h"
#endif
#ifndef __NR_bpf
# if defined(__mips__) && defined(_ABIO32)
......@@ -25,24 +35,23 @@
* requested during loader program generation.
*/
struct bpf_map_desc {
union {
/* input for the loader prog */
struct {
__aligned_u64 initial_value;
__u32 max_entries;
};
/* output of the loader prog */
struct {
int map_fd;
};
};
/* output of the loader prog */
int map_fd;
/* input for the loader prog */
__u32 max_entries;
__aligned_u64 initial_value;
};
struct bpf_prog_desc {
int prog_fd;
};
enum {
BPF_SKEL_KERNEL = (1ULL << 0),
};
struct bpf_loader_ctx {
size_t sz;
__u32 sz;
__u32 flags;
__u32 log_level;
__u32 log_size;
__u64 log_buf;
......@@ -57,12 +66,144 @@ struct bpf_load_and_run_opts {
const char *errstr;
};
long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
unsigned int size)
{
#ifdef __KERNEL__
return bpf_sys_bpf(cmd, attr, size);
#else
return syscall(__NR_bpf, cmd, attr, size);
#endif
}
#ifdef __KERNEL__
static inline int close(int fd)
{
return close_fd(fd);
}
static inline void *skel_alloc(size_t size)
{
struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
if (!ctx)
return NULL;
ctx->flags |= BPF_SKEL_KERNEL;
return ctx;
}
static inline void skel_free(const void *p)
{
kfree(p);
}
/* skel->bss/rodata maps are populated the following way:
*
* For kernel use:
* skel_prep_map_data() allocates kernel memory that kernel module can directly access.
* Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
* is not nessary.
*
* For user space:
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
* Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
* The loader program will perform copy_from_user() from maps.rodata.initial_value.
* skel_finalize_map_data() remaps bpf array map value from the kernel memory into
* skel->rodata address.
*
* The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
* both kernel and user space. The generated loader program does
* either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
* depending on bpf_loader_ctx->flags.
*/
static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
{
if (addr != ~0ULL)
kvfree(p);
/* When addr == ~0ULL the 'p' points to
* ((struct bpf_array *)map)->value. See skel_finalize_map_data.
*/
}
static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
{
void *addr;
addr = kvmalloc(val_sz, GFP_KERNEL);
if (!addr)
return NULL;
memcpy(addr, val, val_sz);
return addr;
}
static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
{
struct bpf_map *map;
void *addr = NULL;
kvfree((void *) (long) *init_val);
*init_val = ~0ULL;
/* At this point bpf_load_and_run() finished without error and
* 'fd' is a valid bpf map FD. All sanity checks below should succeed.
*/
map = bpf_map_get(fd);
if (IS_ERR(map))
return NULL;
if (map->map_type != BPF_MAP_TYPE_ARRAY)
goto out;
addr = ((struct bpf_array *)map)->value;
/* the addr stays valid, since FD is not closed */
out:
bpf_map_put(map);
return addr;
}
#else
static inline void *skel_alloc(size_t size)
{
return calloc(1, size);
}
static inline void skel_free(void *p)
{
free(p);
}
static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
{
munmap(p, sz);
}
static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
{
void *addr;
addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (addr == (void *) -1)
return NULL;
memcpy(addr, val, val_sz);
return addr;
}
static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
{
void *addr;
addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
if (addr == (void *) -1)
return NULL;
return addr;
}
#endif
static inline int skel_closenz(int fd)
{
if (fd > 0)
......@@ -136,22 +277,28 @@ static inline int skel_link_create(int prog_fd, int target_fd,
return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
}
#ifdef __KERNEL__
#define set_err
#else
#define set_err err = -errno
#endif
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
{
int map_fd = -1, prog_fd = -1, key = 0, err;
union bpf_attr attr;
map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
if (map_fd < 0) {
opts->errstr = "failed to create loader map";
err = -errno;
set_err;
goto out;
}
err = skel_map_update_elem(map_fd, &key, opts->data, 0);
if (err < 0) {
opts->errstr = "failed to update loader map";
err = -errno;
set_err;
goto out;
}
......@@ -166,10 +313,10 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
attr.log_size = opts->ctx->log_size;
attr.log_buf = opts->ctx->log_buf;
attr.prog_flags = BPF_F_SLEEPABLE;
prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
if (prog_fd < 0) {
opts->errstr = "failed to load loader prog";
err = -errno;
set_err;
goto out;
}
......@@ -181,10 +328,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
if (err < 0 || (int)attr.test.retval < 0) {
opts->errstr = "failed to execute loader prog";
if (err < 0) {
err = -errno;
set_err;
} else {
err = (int)attr.test.retval;
#ifndef __KERNEL__
errno = -err;
#endif
}
goto out;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment