Commit 23e403b3 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'BPF open-coded iterators'

Andrii Nakryiko says:

====================

Add support for open-coded (aka inline) iterators in BPF world. This is a next
evolution of gradually allowing more powerful and less restrictive looping and
iteration capabilities to BPF programs.

We set up a framework for implementing all kinds of iterators (e.g., cgroup,
task, file, etc, iterators), but this patch set only implements numbers
iterator, which is used to implement ergonomic bpf_for() for-like construct
(see patches #4-#5). We also add bpf_for_each(), which is a generic
foreach-like construct that will work with any kind of open-coded iterator
implementation, as long as we stick with bpf_iter_<type>_{new,next,destroy}()
naming pattern (which we now enforce on the kernel side).

Patch #1 is preparatory refactoring for easier way to check for special kfunc
calls. Patch #2 is adding iterator kfunc registration and validation logic,
which is mostly independent from the rest of open-coded iterator logic, so is
separated out for easier reviewing.

The meat of verifier-side logic is in patch #3. Patch #4 implements numbers
iterator. I kept them separate to have clean reference for how to integrate
new iterator types (now even simpler to do than in v1 of this patch set).
Patch #5 adds bpf_for(), bpf_for_each(), and bpf_repeat() macros to
bpf_misc.h, and also adds yet another pyperf test variant, now with bpf_for()
loop. Patch #6 is verification tests, based on numbers iterator (as the only
available right now). Patch #7 actually tests runtime behavior of numbers
iterator.

Finally, with changes in v2, it's possible and trivial to implement custom
iterators completely in kernel modules, which we showcase and test by adding
a simple iterator returning same number a given number of times to
bpf_testmod. Patch #8 is where all this happens and is tested.

Most of the relevant details are in corresponding commit messages or code
comments.

v4->v5:
  - fixing missed inner for() in is_iter_reg_valid_uninit, and fixed return
    false (kernel test robot);
  - typo fixes and comment/commit description improvements throughout the
    patch set;
v3->v4:
  - remove unused variable from is_iter_reg_valid_init (kernel test robot);
v2->v3:
  - remove special kfunc leftovers for bpf_iter_num_{new,next,destroy};
  - add iters/testmod_seq* to DENYLIST.s390x, it doesn't support kfuncs in
    modules yet (CI);
v1->v2:
  - rebased on latest, dropping previously landed preparatory patches;
  - each iterator type now have its own `struct bpf_iter_<type>` which allows
    each iterator implementation to use exactly as much stack space as
    necessary, allowing to avoid runtime allocations (Alexei);
  - reworked how iterator kfuncs are defined, no verifier changes are required
    when adding new iterator type;
  - added bpf_testmod-based iterator implementation;
  - address the rest of feedback, comments, commit message adjustment, etc.

Cc: Tejun Heo <tj@kernel.org>
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents ed69e066 7e86a8c4
......@@ -1617,8 +1617,12 @@ struct bpf_array {
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
#define MAX_TAIL_CALL_CNT 33
/* Maximum number of loops for bpf_loop */
#define BPF_MAX_LOOPS BIT(23)
/* Maximum number of loops for bpf_loop and bpf_iter_num.
* It's enum to expose it (and thus make it discoverable) through BTF.
*/
enum {
BPF_MAX_LOOPS = 8 * 1024 * 1024,
};
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
BPF_F_RDONLY_PROG | \
......
......@@ -59,6 +59,14 @@ struct bpf_active_lock {
u32 id;
};
#define ITER_PREFIX "bpf_iter_"
enum bpf_iter_state {
BPF_ITER_STATE_INVALID, /* for non-first slot */
BPF_ITER_STATE_ACTIVE,
BPF_ITER_STATE_DRAINED,
};
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
......@@ -103,6 +111,18 @@ struct bpf_reg_state {
bool first_slot;
} dynptr;
/* For bpf_iter stack slots */
struct {
/* BTF container and BTF type ID describing
* struct bpf_iter_<type> of an iterator state
*/
struct btf *btf;
u32 btf_id;
/* packing following two fields to fit iter state into 16 bytes */
enum bpf_iter_state state:2;
int depth:30;
} iter;
/* Max size from any of the above. */
struct {
unsigned long raw1;
......@@ -141,6 +161,8 @@ struct bpf_reg_state {
* same reference to the socket, to determine proper reference freeing.
* For stack slots that are dynptrs, this is used to track references to
* the dynptr to determine proper reference freeing.
* Similarly to dynptrs, we use ID to track "belonging" of a reference
* to a specific instance of bpf_iter.
*/
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
......@@ -211,9 +233,11 @@ enum bpf_stack_slot_type {
* is stored in bpf_stack_state->spilled_ptr.dynptr.type
*/
STACK_DYNPTR,
STACK_ITER,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
......@@ -448,6 +472,7 @@ struct bpf_insn_aux_data {
bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool zext_dst; /* this insn zero extends dst reg */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
u8 alu_state; /* used in combination with alu_limit */
/* below fields are initialized once */
......
......@@ -71,6 +71,10 @@
#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
/* only one of KF_ITER_{NEW,NEXT,DESTROY} could be specified per kfunc */
#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
......
......@@ -7112,4 +7112,12 @@ enum {
BPF_F_TIMER_ABS = (1ULL << 0),
};
/* BPF numbers iterator state */
struct bpf_iter_num {
/* opaque iterator state; having __u64 here allows to preserve correct
* alignment requirements in vmlinux.h, generated from BTF
*/
__u64 __opaque[1];
} __attribute__((aligned(8)));
#endif /* _UAPI__LINUX_BPF_H__ */
......@@ -776,3 +776,73 @@ const struct bpf_func_proto bpf_loop_proto = {
.arg3_type = ARG_PTR_TO_STACK_OR_NULL,
.arg4_type = ARG_ANYTHING,
};
struct bpf_iter_num_kern {
int cur; /* current value, inclusive */
int end; /* final value, exclusive */
} __aligned(8);
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
__bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end)
{
struct bpf_iter_num_kern *s = (void *)it;
BUILD_BUG_ON(sizeof(struct bpf_iter_num_kern) != sizeof(struct bpf_iter_num));
BUILD_BUG_ON(__alignof__(struct bpf_iter_num_kern) != __alignof__(struct bpf_iter_num));
BTF_TYPE_EMIT(struct btf_iter_num);
/* start == end is legit, it's an empty range and we'll just get NULL
* on first (and any subsequent) bpf_iter_num_next() call
*/
if (start > end) {
s->cur = s->end = 0;
return -EINVAL;
}
/* avoid overflows, e.g., if start == INT_MIN and end == INT_MAX */
if ((s64)end - (s64)start > BPF_MAX_LOOPS) {
s->cur = s->end = 0;
return -E2BIG;
}
/* user will call bpf_iter_num_next() first,
* which will set s->cur to exactly start value;
* underflow shouldn't matter
*/
s->cur = start - 1;
s->end = end;
return 0;
}
__bpf_kfunc int *bpf_iter_num_next(struct bpf_iter_num* it)
{
struct bpf_iter_num_kern *s = (void *)it;
/* check failed initialization or if we are done (same behavior);
* need to be careful about overflow, so convert to s64 for checks,
* e.g., if s->cur == s->end == INT_MAX, we can't just do
* s->cur + 1 >= s->end
*/
if ((s64)(s->cur + 1) >= s->end) {
s->cur = s->end = 0;
return NULL;
}
s->cur++;
return &s->cur;
}
__bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it)
{
struct bpf_iter_num_kern *s = (void *)it;
s->cur = s->end = 0;
}
__diag_pop();
......@@ -7596,6 +7596,108 @@ BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
BTF_TRACING_TYPE_xxx
#undef BTF_TRACING_TYPE
static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
const struct btf_type *func, u32 func_flags)
{
u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
const char *name, *sfx, *iter_name;
const struct btf_param *arg;
const struct btf_type *t;
char exp_name[128];
u32 nr_args;
/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
if (!flags || (flags & (flags - 1)))
return -EINVAL;
/* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
nr_args = btf_type_vlen(func);
if (nr_args < 1)
return -EINVAL;
arg = &btf_params(func)[0];
t = btf_type_skip_modifiers(btf, arg->type, NULL);
if (!t || !btf_type_is_ptr(t))
return -EINVAL;
t = btf_type_skip_modifiers(btf, t->type, NULL);
if (!t || !__btf_type_is_struct(t))
return -EINVAL;
name = btf_name_by_offset(btf, t->name_off);
if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
return -EINVAL;
/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
* fit nicely in stack slots
*/
if (t->size == 0 || (t->size % 8))
return -EINVAL;
/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
* naming pattern
*/
iter_name = name + sizeof(ITER_PREFIX) - 1;
if (flags & KF_ITER_NEW)
sfx = "new";
else if (flags & KF_ITER_NEXT)
sfx = "next";
else /* (flags & KF_ITER_DESTROY) */
sfx = "destroy";
snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
if (strcmp(func_name, exp_name))
return -EINVAL;
/* only iter constructor should have extra arguments */
if (!(flags & KF_ITER_NEW) && nr_args != 1)
return -EINVAL;
if (flags & KF_ITER_NEXT) {
/* bpf_iter_<type>_next() should return pointer */
t = btf_type_skip_modifiers(btf, func->type, NULL);
if (!t || !btf_type_is_ptr(t))
return -EINVAL;
}
if (flags & KF_ITER_DESTROY) {
/* bpf_iter_<type>_destroy() should return void */
t = btf_type_by_id(btf, func->type);
if (!t || !btf_type_is_void(t))
return -EINVAL;
}
return 0;
}
static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
{
const struct btf_type *func;
const char *func_name;
int err;
/* any kfunc should be FUNC -> FUNC_PROTO */
func = btf_type_by_id(btf, func_id);
if (!func || !btf_type_is_func(func))
return -EINVAL;
/* sanity check kfunc name */
func_name = btf_name_by_offset(btf, func->name_off);
if (!func_name || !func_name[0])
return -EINVAL;
func = btf_type_by_id(btf, func->type);
if (!func || !btf_type_is_func_proto(func))
return -EINVAL;
if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
if (err)
return err;
}
return 0;
}
/* Kernel Function (kfunc) BTF ID set registration API */
static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
......@@ -7772,7 +7874,7 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
const struct btf_kfunc_id_set *kset)
{
struct btf *btf;
int ret;
int ret, i;
btf = btf_get_module_btf(kset->owner);
if (!btf) {
......@@ -7789,7 +7891,15 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
if (IS_ERR(btf))
return PTR_ERR(btf);
for (i = 0; i < kset->set->cnt; i++) {
ret = btf_check_kfunc_protos(btf, kset->set->pairs[i].id,
kset->set->pairs[i].flags);
if (ret)
goto err_out;
}
ret = btf_populate_kfunc_set(btf, hook, kset->set);
err_out:
btf_put(btf);
return ret;
}
......
......@@ -2411,6 +2411,9 @@ BTF_ID_FLAGS(func, bpf_rcu_read_lock)
BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
BTF_SET8_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
......
This diff is collapsed.
......@@ -7112,4 +7112,12 @@ enum {
BPF_F_TIMER_ABS = (1ULL << 0),
};
/* BPF numbers iterator state */
struct bpf_iter_num {
/* opaque iterator state; having __u64 here allows to preserve correct
* alignment requirements in vmlinux.h, generated from BTF
*/
__u64 __opaque[1];
} __attribute__((aligned(8)));
#endif /* _UAPI__LINUX_BPF_H__ */
......@@ -8,6 +8,7 @@ dynptr/test_dynptr_skb_data
dynptr/test_skb_readonly
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test # relies on fentry
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
......
......@@ -65,6 +65,34 @@ bpf_testmod_test_mod_kfunc(int i)
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
}
__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
{
if (cnt < 0) {
it->cnt = 0;
return -EINVAL;
}
it->value = value;
it->cnt = cnt;
return 0;
}
__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
{
if (it->cnt <= 0)
return NULL;
it->cnt--;
return &it->value;
}
__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
{
it->cnt = 0;
}
struct bpf_testmod_btf_type_tag_1 {
int a;
};
......@@ -220,6 +248,17 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write,
};
BTF_SET8_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_SET8_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_common_kfunc_ids,
};
BTF_SET8_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_SET8_END(bpf_testmod_check_kfunc_ids)
......@@ -235,7 +274,8 @@ static int bpf_testmod_init(void)
{
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
......
......@@ -22,4 +22,10 @@ struct bpf_testmod_test_writable_ctx {
int val;
};
/* BPF iter that returns *value* *n* times in a row */
struct bpf_iter_testmod_seq {
s64 value;
int cnt;
};
#endif /* _BPF_TESTMOD_H */
......@@ -144,6 +144,12 @@ void test_verif_scale_pyperf600_nounroll()
scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf600_iter()
{
/* open-coded BPF iterator version */
scale_test("pyperf600_iter.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_loop1()
{
scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "iters.skel.h"
#include "iters_state_safety.skel.h"
#include "iters_looping.skel.h"
#include "iters_num.skel.h"
#include "iters_testmod_seq.skel.h"
static void subtest_num_iters(void)
{
struct iters_num *skel;
int err;
skel = iters_num__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = iters_num__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
usleep(1);
iters_num__detach(skel);
#define VALIDATE_CASE(case_name) \
ASSERT_EQ(skel->bss->res_##case_name, \
skel->rodata->exp_##case_name, \
#case_name)
VALIDATE_CASE(empty_zero);
VALIDATE_CASE(empty_int_min);
VALIDATE_CASE(empty_int_max);
VALIDATE_CASE(empty_minus_one);
VALIDATE_CASE(simple_sum);
VALIDATE_CASE(neg_sum);
VALIDATE_CASE(very_neg_sum);
VALIDATE_CASE(neg_pos_sum);
VALIDATE_CASE(invalid_range);
VALIDATE_CASE(max_range);
VALIDATE_CASE(e2big_range);
VALIDATE_CASE(succ_elem_cnt);
VALIDATE_CASE(overfetched_elem_cnt);
VALIDATE_CASE(fail_elem_cnt);
#undef VALIDATE_CASE
cleanup:
iters_num__destroy(skel);
}
static void subtest_testmod_seq_iters(void)
{
struct iters_testmod_seq *skel;
int err;
if (!env.has_testmod) {
test__skip();
return;
}
skel = iters_testmod_seq__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = iters_testmod_seq__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
usleep(1);
iters_testmod_seq__detach(skel);
#define VALIDATE_CASE(case_name) \
ASSERT_EQ(skel->bss->res_##case_name, \
skel->rodata->exp_##case_name, \
#case_name)
VALIDATE_CASE(empty);
VALIDATE_CASE(full);
VALIDATE_CASE(truncated);
#undef VALIDATE_CASE
cleanup:
iters_testmod_seq__destroy(skel);
}
void test_iters(void)
{
RUN_TESTS(iters_state_safety);
RUN_TESTS(iters_looping);
RUN_TESTS(iters);
if (env.has_testmod)
RUN_TESTS(iters_testmod_seq);
if (test__start_subtest("num"))
subtest_num_iters();
if (test__start_subtest("testmod_seq"))
subtest_testmod_seq_iters();
}
......@@ -3,7 +3,6 @@
#include <test_progs.h>
#include "test_uprobe_autoattach.skel.h"
#include "progs/bpf_misc.h"
/* uprobe attach point */
static noinline int autoattach_trigger_func(int arg1, int arg2, int arg3,
......
......@@ -36,6 +36,7 @@
#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
#define __imm(name) [name]"i"(name)
#define __imm_addr(name) [name]"i"(&name)
#define __imm_ptr(name) [name]"p"(&name)
#if defined(__TARGET_ARCH_x86)
#define SYSCALL_WRAPPER 1
......@@ -75,5 +76,104 @@
#define FUNC_REG_ARG_CNT 5
#endif
struct bpf_iter_num;
extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym;
extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym;
extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym;
#ifndef bpf_for_each
/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
* using BPF open-coded iterators without having to write mundane explicit
* low-level loop logic. Instead, it provides for()-like generic construct
* that can be used pretty naturally. E.g., for some hypothetical cgroup
* iterator, you'd write:
*
* struct cgroup *cg, *parent_cg = <...>;
*
* bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
* bpf_printk("Child cgroup id = %d", cg->cgroup_id);
* if (cg->cgroup_id == 123)
* break;
* }
*
* I.e., it looks almost like high-level for each loop in other languages,
* supports continue/break, and is verifiable by BPF verifier.
*
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
* *`, not just `int`. So for integers bpf_for() is more convenient.
*
* Note: this macro relies on C99 feature of allowing to declare variables
* inside for() loop, bound to for() loop lifetime. It also utilizes GCC
* extension: __attribute__((cleanup(<func>))), supported by both GCC and
* Clang.
*/
#define bpf_for_each(type, cur, args...) for ( \
/* initialize and define destructor */ \
struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
cleanup(bpf_iter_##type##_destroy))), \
/* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
*___p = (bpf_iter_##type##_new(&___it, ##args), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_##type##_destroy, (void *)0); \
/* iteration and termination check */ \
(((cur) = bpf_iter_##type##_next(&___it))); \
)
#endif /* bpf_for_each */
#ifndef bpf_for
/* bpf_for(i, start, end) implements a for()-like looping construct that sets
* provided integer variable *i* to values starting from *start* through,
* but not including, *end*. It also proves to BPF verifier that *i* belongs
* to range [start, end), so this can be used for accessing arrays without
* extra checks.
*
* Note: *start* and *end* are assumed to be expressions with no side effects
* and whose values do not change throughout bpf_for() loop execution. They do
* not have to be statically known or constant, though.
*
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
* loop bound variables and cleanup attribute, supported by GCC and Clang.
*/
#define bpf_for(i, start, end) for ( \
/* initialize and define destructor */ \
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
cleanup(bpf_iter_num_destroy))), \
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
*___p = (bpf_iter_num_new(&___it, (start), (end)), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_num_destroy, (void *)0); \
({ \
/* iteration step */ \
int *___t = bpf_iter_num_next(&___it); \
/* termination and bounds check */ \
(___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
}); \
)
#endif /* bpf_for */
#ifndef bpf_repeat
/* bpf_repeat(N) performs N iterations without exposing iteration number
*
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
* loop bound variables and cleanup attribute, supported by GCC and Clang.
*/
#define bpf_repeat(N) for ( \
/* initialize and define destructor */ \
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
cleanup(bpf_iter_num_destroy))), \
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
*___p = (bpf_iter_num_new(&___it, 0, (N)), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_num_destroy, (void *)0); \
bpf_iter_num_next(&___it); \
/* nothing here */ \
)
#endif /* bpf_repeat */
#endif
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
#define ITER_HELPERS \
__imm(bpf_iter_num_new), \
__imm(bpf_iter_num_next), \
__imm(bpf_iter_num_destroy)
SEC("?raw_tp")
__success
int force_clang_to_emit_btf_for_externs(void *ctx)
{
/* we need this as a workaround to enforce compiler emitting BTF
* information for bpf_iter_num_{new,next,destroy}() kfuncs,
* as, apparently, it doesn't emit it for symbols only referenced from
* assembly (or cleanup attribute, for that matter, as well)
*/
bpf_repeat(0);
return 0;
}
SEC("?raw_tp")
__success
int consume_first_item_only(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* consume first item */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto +1;"
"r0 = *(u32 *)(r0 + 0);"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("R0 invalid mem access 'scalar'")
int missing_null_check_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* consume first element */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
/* FAIL: deref with no NULL check */
"r1 = *(u32 *)(r0 + 0);"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure
__msg("invalid access to memory, mem_size=4 off=0 size=8")
__msg("R0 min value is outside of the allowed memory range")
int wrong_sized_read_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* consume first element */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto +1;"
/* FAIL: deref more than available 4 bytes */
"r0 = *(u64 *)(r0 + 0);"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
int simplest_loop(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
"r6 = 0;" /* init sum */
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 10;"
"call %[bpf_iter_num_new];"
"1:"
/* consume next item */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto 2f;"
"r0 = *(u32 *)(r0 + 0);"
"r6 += r0;" /* accumulate sum */
"goto 1b;"
"2:"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common, "r6"
);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <limits.h>
#include <linux/errno.h>
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
const volatile __s64 exp_empty_zero = 0 + 1;
__s64 res_empty_zero;
SEC("raw_tp/sys_enter")
int num_empty_zero(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, 0, 0) sum += i;
res_empty_zero = 1 + sum;
return 0;
}
const volatile __s64 exp_empty_int_min = 0 + 2;
__s64 res_empty_int_min;
SEC("raw_tp/sys_enter")
int num_empty_int_min(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MIN, INT_MIN) sum += i;
res_empty_int_min = 2 + sum;
return 0;
}
const volatile __s64 exp_empty_int_max = 0 + 3;
__s64 res_empty_int_max;
SEC("raw_tp/sys_enter")
int num_empty_int_max(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MAX, INT_MAX) sum += i;
res_empty_int_max = 3 + sum;
return 0;
}
const volatile __s64 exp_empty_minus_one = 0 + 4;
__s64 res_empty_minus_one;
SEC("raw_tp/sys_enter")
int num_empty_minus_one(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, -1, -1) sum += i;
res_empty_minus_one = 4 + sum;
return 0;
}
const volatile __s64 exp_simple_sum = 9 * 10 / 2;
__s64 res_simple_sum;
SEC("raw_tp/sys_enter")
int num_simple_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, 0, 10) sum += i;
res_simple_sum = sum;
return 0;
}
const volatile __s64 exp_neg_sum = -11 * 10 / 2;
__s64 res_neg_sum;
SEC("raw_tp/sys_enter")
int num_neg_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, -10, 0) sum += i;
res_neg_sum = sum;
return 0;
}
const volatile __s64 exp_very_neg_sum = INT_MIN + (__s64)(INT_MIN + 1);
__s64 res_very_neg_sum;
SEC("raw_tp/sys_enter")
int num_very_neg_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MIN, INT_MIN + 2) sum += i;
res_very_neg_sum = sum;
return 0;
}
const volatile __s64 exp_very_big_sum = (__s64)(INT_MAX - 1) + (__s64)(INT_MAX - 2);
__s64 res_very_big_sum;
SEC("raw_tp/sys_enter")
int num_very_big_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MAX - 2, INT_MAX) sum += i;
res_very_big_sum = sum;
return 0;
}
const volatile __s64 exp_neg_pos_sum = -3;
__s64 res_neg_pos_sum;
SEC("raw_tp/sys_enter")
int num_neg_pos_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, -3, 3) sum += i;
res_neg_pos_sum = sum;
return 0;
}
const volatile __s64 exp_invalid_range = -EINVAL;
__s64 res_invalid_range;
SEC("raw_tp/sys_enter")
int num_invalid_range(const void *ctx)
{
struct bpf_iter_num it;
res_invalid_range = bpf_iter_num_new(&it, 1, 0);
bpf_iter_num_destroy(&it);
return 0;
}
const volatile __s64 exp_max_range = 0 + 10;
__s64 res_max_range;
SEC("raw_tp/sys_enter")
int num_max_range(const void *ctx)
{
struct bpf_iter_num it;
res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS);
bpf_iter_num_destroy(&it);
return 0;
}
const volatile __s64 exp_e2big_range = -E2BIG;
__s64 res_e2big_range;
SEC("raw_tp/sys_enter")
int num_e2big_range(const void *ctx)
{
struct bpf_iter_num it;
res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS);
bpf_iter_num_destroy(&it);
return 0;
}
const volatile __s64 exp_succ_elem_cnt = 10;
__s64 res_succ_elem_cnt;
SEC("raw_tp/sys_enter")
int num_succ_elem_cnt(const void *ctx)
{
struct bpf_iter_num it;
int cnt = 0, *v;
bpf_iter_num_new(&it, 0, 10);
while ((v = bpf_iter_num_next(&it))) {
cnt++;
}
bpf_iter_num_destroy(&it);
res_succ_elem_cnt = cnt;
return 0;
}
const volatile __s64 exp_overfetched_elem_cnt = 5;
__s64 res_overfetched_elem_cnt;
SEC("raw_tp/sys_enter")
int num_overfetched_elem_cnt(const void *ctx)
{
struct bpf_iter_num it;
int cnt = 0, *v, i;
bpf_iter_num_new(&it, 0, 5);
for (i = 0; i < 10; i++) {
v = bpf_iter_num_next(&it);
if (v)
cnt++;
}
bpf_iter_num_destroy(&it);
res_overfetched_elem_cnt = cnt;
return 0;
}
const volatile __s64 exp_fail_elem_cnt = 20 + 0;
__s64 res_fail_elem_cnt;
SEC("raw_tp/sys_enter")
int num_fail_elem_cnt(const void *ctx)
{
struct bpf_iter_num it;
int cnt = 0, *v, i;
bpf_iter_num_new(&it, 100, 10);
for (i = 0; i < 10; i++) {
v = bpf_iter_num_next(&it);
if (v)
cnt++;
}
bpf_iter_num_destroy(&it);
res_fail_elem_cnt = 20 + cnt;
return 0;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
#define ITER_HELPERS \
__imm(bpf_iter_num_new), \
__imm(bpf_iter_num_next), \
__imm(bpf_iter_num_destroy)
SEC("?raw_tp")
__success
int force_clang_to_emit_btf_for_externs(void *ctx)
{
/* we need this as a workaround to enforce compiler emitting BTF
* information for bpf_iter_num_{new,next,destroy}() kfuncs,
* as, apparently, it doesn't emit it for symbols only referenced from
* assembly (or cleanup attribute, for that matter, as well)
*/
bpf_repeat(0);
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
int create_and_destroy(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("Unreleased reference id=1")
int create_and_forget_to_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int destroy_without_creating_fail(void *ctx)
{
/* init with zeros to stop verifier complaining about uninit stack */
struct bpf_iter_num iter;
asm volatile (
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int compromise_iter_w_direct_write_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* directly write over first half of iter state */
"*(u64 *)(%[iter] + 0) = r0;"
/* (attempt to) destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("Unreleased reference id=1")
int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* directly write over first half of iter state */
"*(u64 *)(%[iter] + 0) = r0;"
/* don't destroy iter, leaking ref, which should fail */
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int compromise_iter_w_helper_write_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* overwrite 8th byte with bpf_probe_read_kernel() */
"r1 = %[iter];"
"r1 += 7;"
"r2 = 1;"
"r3 = 0;" /* NULL */
"call %[bpf_probe_read_kernel];"
/* (attempt to) destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel)
: __clobber_common
);
return 0;
}
static __noinline void subprog_with_iter(void)
{
struct bpf_iter_num iter;
bpf_iter_num_new(&iter, 0, 1);
return;
}
SEC("?raw_tp")
__failure
/* ensure there was a call to subprog, which might happen without __noinline */
__msg("returning from callee:")
__msg("Unreleased reference id=1")
int leak_iter_from_subprog_fail(void *ctx)
{
subprog_with_iter();
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
int valid_stack_reuse(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
/* now reuse same stack slots */
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected uninitialized iter_num as arg #1")
int double_create_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* (attempt to) create iterator again */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int double_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
/* (attempt to) destroy iterator again */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int next_without_new_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* don't create iterator and try to iterate*/
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int next_after_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
/* don't create iterator and try to iterate*/
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("invalid read from stack")
int __naked read_from_iter_slot_fail(void)
{
asm volatile (
/* r6 points to struct bpf_iter_num on the stack */
"r6 = r10;"
"r6 += -24;"
/* create iterator */
"r1 = r6;"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* attemp to leak bpf_iter_num state */
"r7 = *(u64 *)(r6 + 0);"
"r8 = *(u64 *)(r6 + 8);"
/* destroy iterator */
"r1 = r6;"
"call %[bpf_iter_num_destroy];"
/* leak bpf_iter_num state */
"r0 = r7;"
"if r7 > r8 goto +1;"
"r0 = r8;"
"exit;"
:
: ITER_HELPERS
: __clobber_common, "r6", "r7", "r8"
);
}
int zero;
SEC("?raw_tp")
__failure
__flag(BPF_F_TEST_STATE_FREQ)
__msg("Unreleased reference")
int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* Create a fork in logic, with general setup as follows:
* - fallthrough (first) path is valid;
* - branch (second) path is invalid.
* Then depending on what we do in fallthrough vs branch path,
* we try to detect bugs in func_states_equal(), regsafe(),
* refsafe(), stack_safe(), and similar by tricking verifier
* into believing that branch state is a valid subset of
* a fallthrough state. Verifier should reject overall
* validation, unless there is a bug somewhere in verifier
* logic.
*/
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"if r6 > r7 goto bad;" /* fork */
/* spill r6 into stack slot of bpf_iter_num var */
"*(u64 *)(%[iter] + 0) = r6;"
"goto skip_bad;"
"bad:"
/* create iterator in the same stack slot */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* but then forget about it and overwrite it back to r6 spill */
"*(u64 *)(%[iter] + 0) = r6;"
"skip_bad:"
"goto +0;" /* force checkpoint */
/* corrupt stack slots, if they are really dynptr */
"*(u64 *)(%[iter] + 0) = r6;"
:
: __imm_ptr(iter),
__imm_addr(zero),
__imm(bpf_get_prandom_u32),
__imm(bpf_dynptr_from_mem),
ITER_HELPERS
: __clobber_common, "r6", "r7"
);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct bpf_iter_testmod_seq {
u64 :64;
u64 :64;
};
extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym;
extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym;
extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym;
const volatile __s64 exp_empty = 0 + 1;
__s64 res_empty;
SEC("raw_tp/sys_enter")
__success __log_level(2)
__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_empty(const void *ctx)
{
__s64 sum = 0, *i;
bpf_for_each(testmod_seq, i, 1000, 0) sum += *i;
res_empty = 1 + sum;
return 0;
}
const volatile __s64 exp_full = 1000000;
__s64 res_full;
SEC("raw_tp/sys_enter")
__success __log_level(2)
__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_full(const void *ctx)
{
__s64 sum = 0, *i;
bpf_for_each(testmod_seq, i, 1000, 1000) sum += *i;
res_full = sum;
return 0;
}
const volatile __s64 exp_truncated = 10 * 1000000;
__s64 res_truncated;
static volatile int zero = 0;
SEC("raw_tp/sys_enter")
__success __log_level(2)
__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_truncated(const void *ctx)
{
__s64 sum = 0, *i;
int cnt = zero;
bpf_for_each(testmod_seq, i, 10, 2000000) {
sum += *i;
cnt++;
if (cnt >= 1000000)
break;
}
res_truncated = sum;
return 0;
}
char _license[] SEC("license") = "GPL";
......@@ -4,12 +4,12 @@
* Copyright 2020 Google LLC.
*/
#include "bpf_misc.h"
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
......
......@@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define FUNCTION_NAME_LEN 64
#define FILE_NAME_LEN 128
......@@ -294,17 +295,22 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
if (ctx.done)
return 0;
#else
#ifdef NO_UNROLL
#if defined(USE_ITER)
/* no for loop, no unrolling */
#elif defined(NO_UNROLL)
#pragma clang loop unroll(disable)
#else
#ifdef UNROLL_COUNT
#elif defined(UNROLL_COUNT)
#pragma clang loop unroll_count(UNROLL_COUNT)
#else
#pragma clang loop unroll(full)
#endif
#endif /* NO_UNROLL */
/* Unwind python stack */
#ifdef USE_ITER
int i;
bpf_for(i, 0, STACK_MAX_LEN) {
#else /* !USE_ITER */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
#endif
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym);
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
#define STACK_MAX_LEN 600
#define SUBPROGS
#define NO_UNROLL
#define USE_ITER
#include "pyperf.h"
......@@ -2,7 +2,4 @@
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
#define NO_UNROLL
/* clang will not unroll at all.
* Total program size is around 2k insns
*/
#include "pyperf.h"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment