Commit d2a93715 authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

selftests/bpf: Add tests for BPF exceptions

Add selftests to cover success and failure cases of API usage, runtime
behavior and invariants that need to be maintained for implementation
correctness.
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20230912233214.1518551-18-memxor@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d6ea0680
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
exceptions # JIT does not support calling kfunc bpf_throw: -524
fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
......
......@@ -6,6 +6,7 @@ bpf_loop # attaches to __x64_sys_nanosleep
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
dynptr/test_dynptr_skb_data
dynptr/test_skb_readonly
exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_endian.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
#ifndef ETH_P_IP
#define ETH_P_IP 0x0800
#endif
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 4);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static __noinline int static_func(u64 i)
{
bpf_throw(32);
return i;
}
__noinline int global2static_simple(u64 i)
{
static_func(i + 2);
return i - 1;
}
__noinline int global2static(u64 i)
{
if (i == ETH_P_IP)
bpf_throw(16);
return static_func(i);
}
static __noinline int static2global(u64 i)
{
return global2static(i) + i;
}
SEC("tc")
int exception_throw_always_1(struct __sk_buff *ctx)
{
bpf_throw(64);
return 0;
}
/* In this case, the global func will never be seen executing after call to
* static subprog, hence verifier will DCE the remaining instructions. Ensure we
* are resilient to that.
*/
SEC("tc")
int exception_throw_always_2(struct __sk_buff *ctx)
{
return global2static_simple(ctx->protocol);
}
SEC("tc")
int exception_throw_unwind_1(struct __sk_buff *ctx)
{
return static2global(bpf_ntohs(ctx->protocol));
}
SEC("tc")
int exception_throw_unwind_2(struct __sk_buff *ctx)
{
return static2global(bpf_ntohs(ctx->protocol) - 1);
}
SEC("tc")
int exception_throw_default(struct __sk_buff *ctx)
{
bpf_throw(0);
return 1;
}
SEC("tc")
int exception_throw_default_value(struct __sk_buff *ctx)
{
bpf_throw(5);
return 1;
}
SEC("tc")
int exception_tail_call_target(struct __sk_buff *ctx)
{
bpf_throw(16);
return 0;
}
static __noinline
int exception_tail_call_subprog(struct __sk_buff *ctx)
{
volatile int ret = 10;
bpf_tail_call_static(ctx, &jmp_table, 0);
return ret;
}
SEC("tc")
int exception_tail_call(struct __sk_buff *ctx) {
volatile int ret = 0;
ret = exception_tail_call_subprog(ctx);
return ret + 8;
}
__noinline int exception_ext_global(struct __sk_buff *ctx)
{
volatile int ret = 0;
return ret;
}
static __noinline int exception_ext_static(struct __sk_buff *ctx)
{
return exception_ext_global(ctx);
}
SEC("tc")
int exception_ext(struct __sk_buff *ctx)
{
return exception_ext_static(ctx);
}
__noinline int exception_cb_mod_global(u64 cookie)
{
volatile int ret = 0;
return ret;
}
/* Example of how the exception callback supplied during verification can still
* introduce extensions by calling to dummy global functions, and alter runtime
* behavior.
*
* Right now we don't allow freplace attachment to exception callback itself,
* but if the need arises this restriction is technically feasible to relax in
* the future.
*/
__noinline int exception_cb_mod(u64 cookie)
{
return exception_cb_mod_global(cookie) + cookie + 10;
}
SEC("tc")
__exception_cb(exception_cb_mod)
int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
{
bpf_throw(25);
return 0;
}
__noinline static int subprog(struct __sk_buff *ctx)
{
return bpf_ktime_get_ns();
}
__noinline static int throwing_subprog(struct __sk_buff *ctx)
{
if (ctx->tstamp)
bpf_throw(0);
return bpf_ktime_get_ns();
}
__noinline int global_subprog(struct __sk_buff *ctx)
{
return bpf_ktime_get_ns();
}
__noinline int throwing_global_subprog(struct __sk_buff *ctx)
{
if (ctx->tstamp)
bpf_throw(0);
return bpf_ktime_get_ns();
}
SEC("tc")
int exception_throw_subprog(struct __sk_buff *ctx)
{
switch (ctx->protocol) {
case 1:
return subprog(ctx);
case 2:
return global_subprog(ctx);
case 3:
return throwing_subprog(ctx);
case 4:
return throwing_global_subprog(ctx);
default:
break;
}
bpf_throw(1);
return 0;
}
__noinline int assert_nz_gfunc(u64 c)
{
volatile u64 cookie = c;
bpf_assert(cookie != 0);
return 0;
}
__noinline int assert_zero_gfunc(u64 c)
{
volatile u64 cookie = c;
bpf_assert_eq(cookie, 0);
return 0;
}
__noinline int assert_neg_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert_lt(cookie, 0);
return 0;
}
__noinline int assert_pos_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert_gt(cookie, 0);
return 0;
}
__noinline int assert_negeq_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert_le(cookie, -1);
return 0;
}
__noinline int assert_poseq_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert_ge(cookie, 1);
return 0;
}
__noinline int assert_nz_gfunc_with(u64 c)
{
volatile u64 cookie = c;
bpf_assert_with(cookie != 0, cookie + 100);
return 0;
}
__noinline int assert_zero_gfunc_with(u64 c)
{
volatile u64 cookie = c;
bpf_assert_eq_with(cookie, 0, cookie + 100);
return 0;
}
__noinline int assert_neg_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_lt_with(cookie, 0, cookie + 100);
return 0;
}
__noinline int assert_pos_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_gt_with(cookie, 0, cookie + 100);
return 0;
}
__noinline int assert_negeq_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_le_with(cookie, -1, cookie + 100);
return 0;
}
__noinline int assert_poseq_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_ge_with(cookie, 1, cookie + 100);
return 0;
}
#define check_assert(name, cookie, tag) \
SEC("tc") \
int exception##tag##name(struct __sk_buff *ctx) \
{ \
return name(cookie) + 1; \
}
check_assert(assert_nz_gfunc, 5, _);
check_assert(assert_zero_gfunc, 0, _);
check_assert(assert_neg_gfunc, -100, _);
check_assert(assert_pos_gfunc, 100, _);
check_assert(assert_negeq_gfunc, -1, _);
check_assert(assert_poseq_gfunc, 1, _);
check_assert(assert_nz_gfunc_with, 5, _);
check_assert(assert_zero_gfunc_with, 0, _);
check_assert(assert_neg_gfunc_with, -100, _);
check_assert(assert_pos_gfunc_with, 100, _);
check_assert(assert_negeq_gfunc_with, -1, _);
check_assert(assert_poseq_gfunc_with, 1, _);
check_assert(assert_nz_gfunc, 0, _bad_);
check_assert(assert_zero_gfunc, 5, _bad_);
check_assert(assert_neg_gfunc, 100, _bad_);
check_assert(assert_pos_gfunc, -100, _bad_);
check_assert(assert_negeq_gfunc, 1, _bad_);
check_assert(assert_poseq_gfunc, -1, _bad_);
check_assert(assert_nz_gfunc_with, 0, _bad_);
check_assert(assert_zero_gfunc_with, 5, _bad_);
check_assert(assert_neg_gfunc_with, 100, _bad_);
check_assert(assert_pos_gfunc_with, -100, _bad_);
check_assert(assert_negeq_gfunc_with, 1, _bad_);
check_assert(assert_poseq_gfunc_with, -1, _bad_);
SEC("tc")
int exception_assert_range(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range(time, 0, ~0ULL);
return 1;
}
SEC("tc")
int exception_assert_range_with(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range_with(time, 0, ~0ULL, 10);
return 1;
}
SEC("tc")
int exception_bad_assert_range(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range(time, -100, 100);
return 1;
}
SEC("tc")
int exception_bad_assert_range_with(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range_with(time, -1000, 1000, 10);
return 1;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <limits.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_endian.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
#define check_assert(type, op, name, value) \
SEC("?tc") \
__log_level(2) __failure \
int check_assert_##op##_##name(void *ctx) \
{ \
type num = bpf_ktime_get_ns(); \
bpf_assert_##op(num, value); \
return *(u64 *)num; \
}
__msg(": R0_w=-2147483648 R10=fp0")
check_assert(s64, eq, int_min, INT_MIN);
__msg(": R0_w=2147483647 R10=fp0")
check_assert(s64, eq, int_max, INT_MAX);
__msg(": R0_w=0 R10=fp0")
check_assert(s64, eq, zero, 0);
__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0")
check_assert(s64, eq, llong_min, LLONG_MIN);
__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0")
check_assert(s64, eq, llong_max, LLONG_MAX);
__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
check_assert(s64, lt, pos, INT_MAX);
__msg(": R0_w=scalar(umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, zero, 0);
__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, neg, INT_MIN);
__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
check_assert(s64, le, pos, INT_MAX);
__msg(": R0_w=scalar(smax=0) R10=fp0")
check_assert(s64, le, zero, 0);
__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, le, neg, INT_MIN);
__msg(": R0_w=scalar(umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, pos, INT_MAX);
__msg(": R0_w=scalar(umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, zero, 0);
__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
check_assert(s64, gt, neg, INT_MIN);
__msg(": R0_w=scalar(umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, ge, pos, INT_MAX);
__msg(": R0_w=scalar(umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
check_assert(s64, ge, zero, 0);
__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
check_assert(s64, ge, neg, INT_MIN);
SEC("?tc")
__log_level(2) __failure
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=-2147483646,smax=2147483645) R10=fp0")
int check_assert_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
s64 num;
_Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
if (!sk)
return 0;
num = sk->rx_queue_mapping;
bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2);
return *((u8 *)ctx + num);
}
SEC("?tc")
__log_level(2) __failure
__msg(": R1=ctx(off=0,imm=0) R2=scalar(umin=4096,umax=8192,var_off=(0x0; 0x3fff))")
int check_assert_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;
bpf_assert_range(num, 4096, 8192);
return *((u8 *)ctx + num);
}
SEC("?tc")
__log_level(2) __failure
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
int check_assert_single_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
s64 num;
_Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
if (!sk)
return 0;
num = sk->rx_queue_mapping;
bpf_assert_range(num, 4096, 4096);
return *((u8 *)ctx + num);
}
SEC("?tc")
__log_level(2) __failure
__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
int check_assert_single_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;
bpf_assert_range(num, 4096, 4096);
return *((u8 *)ctx + num);
}
SEC("?tc")
__log_level(2) __failure
__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0")
int check_assert_generic(struct __sk_buff *ctx)
{
u8 *data_end = (void *)(long)ctx->data_end;
u8 *data = (void *)(long)ctx->data;
bpf_assert(data + 64 <= data_end);
return data[128];
}
SEC("?fentry/bpf_check")
__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
int check_assert_with_return(void *ctx)
{
bpf_assert_with(!ctx, 64);
return 0;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_experimental.h"
SEC("?fentry")
int pfentry(void *ctx)
{
return 0;
}
SEC("?fentry")
int throwing_fentry(void *ctx)
{
bpf_throw(0);
return 0;
}
__noinline int exception_cb(u64 cookie)
{
return cookie + 64;
}
SEC("?freplace")
int extension(struct __sk_buff *ctx)
{
return 0;
}
SEC("?freplace")
__exception_cb(exception_cb)
int throwing_exception_cb_extension(u64 cookie)
{
bpf_throw(32);
return 0;
}
SEC("?freplace")
__exception_cb(exception_cb)
int throwing_extension(struct __sk_buff *ctx)
{
bpf_throw(64);
return 0;
}
SEC("?fexit")
int pfexit(void *ctx)
{
return 0;
}
SEC("?fexit")
int throwing_fexit(void *ctx)
{
bpf_throw(0);
return 0;
}
SEC("?fmod_ret")
int pfmod_ret(void *ctx)
{
return 0;
}
SEC("?fmod_ret")
int throwing_fmod_ret(void *ctx)
{
bpf_throw(0);
return 0;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
extern void bpf_rcu_read_lock(void) __ksym;
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
struct foo {
struct bpf_rb_node node;
};
struct hmap_elem {
struct bpf_timer timer;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 64);
__type(key, int);
__type(value, struct hmap_elem);
} hmap SEC(".maps");
private(A) struct bpf_spin_lock lock;
private(A) struct bpf_rb_root rbtree __contains(foo, node);
__noinline void *exception_cb_bad_ret_type(u64 cookie)
{
return NULL;
}
__noinline int exception_cb_bad_arg_0(void)
{
return 0;
}
__noinline int exception_cb_bad_arg_2(int a, int b)
{
return 0;
}
__noinline int exception_cb_ok_arg_small(int a)
{
return 0;
}
SEC("?tc")
__exception_cb(exception_cb_bad_ret_type)
__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
int reject_exception_cb_type_1(struct __sk_buff *ctx)
{
bpf_throw(0);
return 0;
}
SEC("?tc")
__exception_cb(exception_cb_bad_arg_0)
__failure __msg("exception cb only supports single integer argument")
int reject_exception_cb_type_2(struct __sk_buff *ctx)
{
bpf_throw(0);
return 0;
}
SEC("?tc")
__exception_cb(exception_cb_bad_arg_2)
__failure __msg("exception cb only supports single integer argument")
int reject_exception_cb_type_3(struct __sk_buff *ctx)
{
bpf_throw(0);
return 0;
}
SEC("?tc")
__exception_cb(exception_cb_ok_arg_small)
__success
int reject_exception_cb_type_4(struct __sk_buff *ctx)
{
bpf_throw(0);
return 0;
}
__noinline
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
{
bpf_throw(0);
return 0;
}
SEC("?tc")
__failure __msg("cannot be called from callback subprog")
int reject_async_callback_throw(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
elem = bpf_map_lookup_elem(&hmap, &(int){0});
if (!elem)
return 0;
return bpf_timer_set_callback(&elem->timer, timer_cb);
}
__noinline static int subprog_lock(struct __sk_buff *ctx)
{
volatile int ret = 0;
bpf_spin_lock(&lock);
if (ctx->len)
bpf_throw(0);
return ret;
}
SEC("?tc")
__failure __msg("function calls are not allowed while holding a lock")
int reject_with_lock(void *ctx)
{
bpf_spin_lock(&lock);
bpf_throw(0);
return 0;
}
SEC("?tc")
__failure __msg("function calls are not allowed while holding a lock")
int reject_subprog_with_lock(void *ctx)
{
return subprog_lock(ctx);
}
SEC("?tc")
__failure __msg("bpf_rcu_read_unlock is missing")
int reject_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
bpf_throw(0);
return 0;
}
__noinline static int throwing_subprog(struct __sk_buff *ctx)
{
if (ctx->len)
bpf_throw(0);
return 0;
}
SEC("?tc")
__failure __msg("bpf_rcu_read_unlock is missing")
int reject_subprog_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
return throwing_subprog(ctx);
}
static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
{
bpf_throw(0);
return true;
}
SEC("?tc")
__failure __msg("function calls are not allowed while holding a lock")
int reject_with_rbtree_add_throw(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&lock);
bpf_rbtree_add(&rbtree, &f->node, rbless);
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference")
int reject_with_reference(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_throw(0);
return 0;
}
__noinline static int subprog_ref(struct __sk_buff *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_throw(0);
return 0;
}
__noinline static int subprog_cb_ref(u32 i, void *ctx)
{
bpf_throw(0);
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference")
int reject_with_cb_reference(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_loop(5, subprog_cb_ref, NULL, 0);
return 0;
}
SEC("?tc")
__failure __msg("cannot be called from callback")
int reject_with_cb(void *ctx)
{
bpf_loop(5, subprog_cb_ref, NULL, 0);
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference")
int reject_with_subprog_reference(void *ctx)
{
return subprog_ref(ctx) + 1;
}
__noinline int throwing_exception_cb(u64 c)
{
bpf_throw(0);
return c;
}
__noinline int exception_cb1(u64 c)
{
return c;
}
__noinline int exception_cb2(u64 c)
{
return c;
}
static __noinline int static_func(struct __sk_buff *ctx)
{
return exception_cb1(ctx->tstamp);
}
__noinline int global_func(struct __sk_buff *ctx)
{
return exception_cb1(ctx->tstamp);
}
SEC("?tc")
__exception_cb(throwing_exception_cb)
__failure __msg("cannot be called from callback subprog")
int reject_throwing_exception_cb(struct __sk_buff *ctx)
{
return 0;
}
SEC("?tc")
__exception_cb(exception_cb1)
__failure __msg("cannot call exception cb directly")
int reject_exception_cb_call_global_func(struct __sk_buff *ctx)
{
return global_func(ctx);
}
SEC("?tc")
__exception_cb(exception_cb1)
__failure __msg("cannot call exception cb directly")
int reject_exception_cb_call_static_func(struct __sk_buff *ctx)
{
return static_func(ctx);
}
SEC("?tc")
__exception_cb(exception_cb1)
__exception_cb(exception_cb2)
__failure __msg("multiple exception callback tags for main subprog")
int reject_multiple_exception_cb(struct __sk_buff *ctx)
{
bpf_throw(0);
return 16;
}
__noinline int exception_cb_bad_ret(u64 c)
{
return c;
}
SEC("?fentry/bpf_check")
__exception_cb(exception_cb_bad_ret)
__failure __msg("At program exit the register R0 has unknown scalar value should")
int reject_set_exception_cb_bad_ret1(void *ctx)
{
return 0;
}
SEC("?fentry/bpf_check")
__failure __msg("At program exit the register R0 has value (0x40; 0x0) should")
int reject_set_exception_cb_bad_ret2(void *ctx)
{
bpf_throw(64);
return 0;
}
__noinline static int loop_cb1(u32 index, int *ctx)
{
bpf_throw(0);
return 0;
}
__noinline static int loop_cb2(u32 index, int *ctx)
{
bpf_throw(0);
return 0;
}
SEC("?tc")
__failure __msg("cannot be called from callback")
int reject_exception_throw_cb(struct __sk_buff *ctx)
{
bpf_loop(5, loop_cb1, NULL, 0);
return 0;
}
SEC("?tc")
__failure __msg("cannot be called from callback")
int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
{
if (ctx->protocol)
bpf_loop(5, loop_cb1, NULL, 0);
else
bpf_loop(5, loop_cb2, NULL, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment