Commit e9936076 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'First set of verifier/*.c migrated to inline assembly'

Eduard Zingerman says:

====================

This is a follow up for RFC [1]. It migrates a first batch of 38
verifier/*.c tests to inline assembly and use of ./test_progs for
actual execution. The migration is done by a python script (see [2]).

Each migrated verifier/xxx.c file is mapped to progs/verifier_xxx.c
plus an entry in the prog_tests/verifier.c. One patch per each file.

A few patches at the beginning of the patch-set extend test_loader
with necessary functionality, mainly:
- support for tests execution in unprivileged mode;
- support for test runs for test programs.

Migrated tests could be selected for execution using the following filter:

  ./test_progs -a verifier_*

An example of the migrated test:

  SEC("xdp")
  __description("XDP pkt read, pkt_data' > pkt_end, corner case, good access")
  __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
  __naked void end_corner_case_good_access_1(void)
  {
          asm volatile ("                                 \
          r2 = *(u32*)(r1 + %[xdp_md_data]);              \
          r3 = *(u32*)(r1 + %[xdp_md_data_end]);          \
          r1 = r2;                                        \
          r1 += 8;                                        \
          if r1 > r3 goto l0_%=;                          \
          r0 = *(u64*)(r1 - 8);                           \
  l0_%=:  r0 = 0;                                         \
          exit;                                           \
  "       :
          : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
            __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
          : __clobber_all);
  }

Changes compared to RFC:
- test_loader.c is extended to support test program runs;
- capabilities handling now matches behavior of test_verifier;
- BPF_ST_MEM instructions are automatically replaced by BPF_STX_MEM
  instructions to overcome current clang limitations;
- tests styling updates according to RFC feedback;
- 38 migrated files are included instead of 1.

I used the following means for testing:
- migration tool itself has a set of self-tests;
- migrated tests are passing;
- manually compared each old/new file side-by-side.

While doing side-by-side comparison I've noted a few defects in the
original tests:
- and.c:
  - One of the jump targets is off by one;
  - BPF_ST_MEM wrong OFF/IMM ordering;
- array_access.c:
  - BPF_ST_MEM wrong OFF/IMM ordering;
- value_or_null.c:
  - BPF_ST_MEM wrong OFF/IMM ordering.

These defects would be addressed separately.

[1] RFC
    https://lore.kernel.org/bpf/20230123145148.2791939-1-eddyz87@gmail.com/
[2] Migration tool
    https://github.com/eddyz87/verifier-tests-migrator
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 496f4f1b ffb515c9
......@@ -231,8 +231,9 @@ TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
TESTING_HELPERS := $(OUTPUT)/testing_helpers.o
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
UNPRIV_HELPERS := $(OUTPUT)/unpriv_helpers.o
TRACE_HELPERS := $(OUTPUT)/trace_helpers.o
JSON_WRITER := $(OUTPUT)/json_writer.o
CAP_HELPERS := $(OUTPUT)/cap_helpers.o
......@@ -252,7 +253,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
$(OUTPUT)/xdping: $(TESTING_HELPERS)
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) $(UNPRIV_HELPERS)
$(OUTPUT)/xsk.o: $(BPFOBJ)
BPFTOOL ?= $(DEFAULT_BPFTOOL)
......@@ -560,8 +561,9 @@ TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
btf_helpers.c flow_dissector_load.h \
cap_helpers.c test_loader.c xsk.c disasm.c \
json_writer.c
cap_helpers.c test_loader.c xsk.c disasm.c \
json_writer.c unpriv_helpers.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
......
// SPDX-License-Identifier: GPL-2.0-only
#ifdef HAVE_GENHDR
# include "autoconf.h"
#else
# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
// SPDX-License-Identifier: GPL-2.0-only
#include <test_progs.h>
#include "cap_helpers.h"
#include "verifier_and.skel.h"
#include "verifier_array_access.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bounds_deduction.skel.h"
#include "verifier_bounds_mix_sign_unsign.skel.h"
#include "verifier_cfg.skel.h"
#include "verifier_cgroup_inv_retcode.skel.h"
#include "verifier_cgroup_skb.skel.h"
#include "verifier_cgroup_storage.skel.h"
#include "verifier_const_or.skel.h"
#include "verifier_ctx_sk_msg.skel.h"
#include "verifier_direct_stack_access_wraparound.skel.h"
#include "verifier_div0.skel.h"
#include "verifier_div_overflow.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
#include "verifier_helper_restricted.skel.h"
#include "verifier_helper_value_access.skel.h"
#include "verifier_int_ptr.skel.h"
#include "verifier_ld_ind.skel.h"
#include "verifier_leak_ptr.skel.h"
#include "verifier_map_ptr.skel.h"
#include "verifier_map_ret_val.skel.h"
#include "verifier_masking.skel.h"
#include "verifier_meta_access.skel.h"
#include "verifier_raw_stack.skel.h"
#include "verifier_raw_tp_writable.skel.h"
#include "verifier_ringbuf.skel.h"
#include "verifier_spill_fill.skel.h"
#include "verifier_stack_ptr.skel.h"
#include "verifier_uninit.skel.h"
#include "verifier_value_adj_spill.skel.h"
#include "verifier_value.skel.h"
#include "verifier_value_or_null.skel.h"
#include "verifier_var_off.skel.h"
#include "verifier_xadd.skel.h"
#include "verifier_xdp.skel.h"
__maybe_unused
static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory)
{
struct test_loader tester = {};
__u64 old_caps;
int err;
/* test_verifier tests are executed w/o CAP_SYS_ADMIN, do the same here */
err = cap_disable_effective(1ULL << CAP_SYS_ADMIN, &old_caps);
if (err) {
PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(err));
return;
}
test_loader__run_subtests(&tester, skel_name, elf_bytes_factory);
test_loader_fini(&tester);
err = cap_enable_effective(old_caps, NULL);
if (err)
PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err));
}
#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes)
void test_verifier_and(void) { RUN(verifier_and); }
void test_verifier_array_access(void) { RUN(verifier_array_access); }
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); }
void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
void test_verifier_const_or(void) { RUN(verifier_const_or); }
void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }
void test_verifier_div0(void) { RUN(verifier_div0); }
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); }
void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); }
void test_verifier_masking(void) { RUN(verifier_masking); }
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); }
void test_verifier_value(void) { RUN(verifier_value); }
void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); }
void test_verifier_var_off(void) { RUN(verifier_var_off); }
void test_verifier_xadd(void) { RUN(verifier_xadd); }
void test_verifier_xdp(void) { RUN(verifier_xdp); }
......@@ -5,12 +5,42 @@
/* This set of attributes controls behavior of the
* test_loader.c:test_loader__run_subtests().
*
* The test_loader sequentially loads each program in a skeleton.
* Programs could be loaded in privileged and unprivileged modes.
* - __success, __failure, __msg imply privileged mode;
* - __success_unpriv, __failure_unpriv, __msg_unpriv imply
* unprivileged mode.
* If combination of privileged and unprivileged attributes is present
* both modes are used. If none are present privileged mode is implied.
*
* See test_loader.c:drop_capabilities() for exact set of capabilities
* that differ between privileged and unprivileged modes.
*
* For test filtering purposes the name of the program loaded in
* unprivileged mode is derived from the usual program name by adding
* `@unpriv' suffix.
*
* __msg Message expected to be found in the verifier log.
* Multiple __msg attributes could be specified.
* __msg_unpriv Same as __msg but for unprivileged mode.
*
* __success Expect program load success in privileged mode.
* __success_unpriv Expect program load success in unprivileged mode.
*
* __failure Expect program load failure in privileged mode.
* __failure_unpriv Expect program load failure in unprivileged mode.
*
* __retval Execute the program using BPF_PROG_TEST_RUN command,
* expect return value to match passed parameter:
* - a decimal number
* - a hexadecimal number, when starts from 0x
* - literal INT_MIN
* - literal POINTER_VALUE (see definition below)
* - literal TEST_DATA_LEN (see definition below)
* __retval_unpriv Same, but load program in unprivileged mode.
*
* __description Text to be used instead of a program name for display
* and filtering purposes.
*
* __log_level Log level to use for the program, numeric value expected.
*
......@@ -27,16 +57,28 @@
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg)))
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg)))
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag)))
#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val)))
#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val)))
/* Convenience macro for use with 'asm volatile' blocks */
#define __naked __attribute__((naked))
#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory"
#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
#define __imm(name) [name]"i"(name)
#define __imm_const(name, expr) [name]"i"(expr)
#define __imm_addr(name) [name]"i"(&name)
#define __imm_ptr(name) [name]"p"(&name)
#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
/* Magic constants used with __retval() */
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
#if defined(__TARGET_ARCH_x86)
#define SYSCALL_WRAPPER 1
......
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/and.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("invalid and of negative number")
__failure __msg("R0 max value is outside of the allowed memory range")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void invalid_and_of_negative_number(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= -4; \
r1 <<= 2; \
r0 += r1; \
l0_%=: r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid range check")
__failure __msg("R0 max value is outside of the allowed memory range")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void invalid_range_check(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
r9 = 1; \
w1 %%= 2; \
w1 += 1; \
w9 &= w1; \
w9 += 1; \
w9 >>= 1; \
w3 = 1; \
w3 -= w9; \
w3 *= 0x10000000; \
r0 += r3; \
*(u32*)(r0 + 0) = r3; \
l0_%=: r0 = r0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("check known subreg with unknown reg")
__success __failure_unpriv __msg_unpriv("R1 !read_ok")
__retval(0)
__naked void known_subreg_with_unknown_reg(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r0 <<= 32; \
r0 += 1; \
r0 &= 0xFFFF1234; \
/* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */\
if w0 < 1 goto l0_%=; \
r1 = *(u32*)(r1 + 512); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("stack out of bounds")
__failure __msg("invalid write to stack")
__failure_unpriv
__naked void stack_out_of_bounds(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 + 8) = r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("uninitialized stack1")
__failure __msg("invalid indirect read from stack")
__failure_unpriv
__naked void uninitialized_stack1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("uninitialized stack2")
__failure __msg("invalid read from stack")
__failure_unpriv
__naked void uninitialized_stack2(void)
{
asm volatile (" \
r2 = r10; \
r0 = *(u64*)(r2 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("invalid fp arithmetic")
__failure __msg("R1 subtraction from stack pointer")
__failure_unpriv
__naked void invalid_fp_arithmetic(void)
{
/* If this gets ever changed, make sure JITs can deal with it. */
asm volatile (" \
r0 = 0; \
r1 = r10; \
r1 -= 8; \
*(u64*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("non-invalid fp arithmetic")
__success __success_unpriv __retval(0)
__naked void non_invalid_fp_arithmetic(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("misaligned read from stack")
__failure __msg("misaligned stack access")
__failure_unpriv
__naked void misaligned_read_from_stack(void)
{
asm volatile (" \
r2 = r10; \
r0 = *(u64*)(r2 - 4); \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/bounds_deduction.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("check deducing bounds from const, 1")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_1(void)
{
asm volatile (" \
r0 = 1; \
if r0 s>= 1 goto l0_%=; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 2")
__success __failure_unpriv
__msg_unpriv("R1 has pointer with unsupported alu operation")
__retval(1)
__naked void deducing_bounds_from_const_2(void)
{
asm volatile (" \
r0 = 1; \
if r0 s>= 1 goto l0_%=; \
exit; \
l0_%=: if r0 s<= 1 goto l1_%=; \
exit; \
l1_%=: r1 -= r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 3")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_3(void)
{
asm volatile (" \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 4")
__success __failure_unpriv
__msg_unpriv("R6 has pointer with unsupported alu operation")
__retval(0)
__naked void deducing_bounds_from_const_4(void)
{
asm volatile (" \
r6 = r1; \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
exit; \
l0_%=: if r0 s>= 0 goto l1_%=; \
exit; \
l1_%=: r6 -= r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 5")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_5(void)
{
asm volatile (" \
r0 = 0; \
if r0 s>= 1 goto l0_%=; \
r0 -= r1; \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 6")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_6(void)
{
asm volatile (" \
r0 = 0; \
if r0 s>= 0 goto l0_%=; \
exit; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 7")
__failure __msg("dereference of modified ctx ptr")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void deducing_bounds_from_const_7(void)
{
asm volatile (" \
r0 = %[__imm_0]; \
if r0 s>= 0 goto l0_%=; \
l0_%=: r1 -= r0; \
r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
exit; \
" :
: __imm_const(__imm_0, ~0),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 8")
__failure __msg("negative offset ctx ptr R1 off=-1 disallowed")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void deducing_bounds_from_const_8(void)
{
asm volatile (" \
r0 = %[__imm_0]; \
if r0 s>= 0 goto l0_%=; \
r1 += r0; \
l0_%=: r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
exit; \
" :
: __imm_const(__imm_0, ~0),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 9")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_9(void)
{
asm volatile (" \
r0 = 0; \
if r0 s>= 0 goto l0_%=; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 10")
__failure
__msg("math between ctx pointer and register with unbounded min value is not allowed")
__failure_unpriv
__naked void deducing_bounds_from_const_10(void)
{
asm volatile (" \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
l0_%=: /* Marks reg as unknown. */ \
r0 = -r0; \
r0 -= r1; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cfg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("unreachable")
__failure __msg("unreachable")
__failure_unpriv
__naked void unreachable(void)
{
asm volatile (" \
exit; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unreachable2")
__failure __msg("unreachable")
__failure_unpriv
__naked void unreachable2(void)
{
asm volatile (" \
goto l0_%=; \
goto l0_%=; \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("out of range jump")
__failure __msg("jump out of range")
__failure_unpriv
__naked void out_of_range_jump(void)
{
asm volatile (" \
goto l0_%=; \
exit; \
l0_%=: \
" ::: __clobber_all);
}
SEC("socket")
__description("out of range jump2")
__failure __msg("jump out of range")
__failure_unpriv
__naked void out_of_range_jump2(void)
{
asm volatile (" \
goto -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("loop (back-edge)")
__failure __msg("unreachable insn 1")
__msg_unpriv("back-edge")
__naked void loop_back_edge(void)
{
asm volatile (" \
l0_%=: goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("loop2 (back-edge)")
__failure __msg("unreachable insn 4")
__msg_unpriv("back-edge")
__naked void loop2_back_edge(void)
{
asm volatile (" \
l0_%=: r1 = r0; \
r2 = r0; \
r3 = r0; \
goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("conditional loop")
__failure __msg("infinite loop detected")
__msg_unpriv("back-edge")
__naked void conditional_loop(void)
{
asm volatile (" \
r0 = r1; \
l0_%=: r2 = r0; \
r3 = r0; \
if r1 == 0 goto l0_%=; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test1")
__failure __msg("R0 has value (0x0; 0xffffffff)")
__naked void with_invalid_return_code_test1(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test2")
__success
__naked void with_invalid_return_code_test2(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
r0 &= 1; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test3")
__failure __msg("R0 has value (0x0; 0x3)")
__naked void with_invalid_return_code_test3(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
r0 &= 3; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test4")
__success
__naked void with_invalid_return_code_test4(void)
{
asm volatile (" \
r0 = 1; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test5")
__failure __msg("R0 has value (0x2; 0x0)")
__naked void with_invalid_return_code_test5(void)
{
asm volatile (" \
r0 = 2; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test6")
__failure __msg("R0 is not a known value (ctx)")
__naked void with_invalid_return_code_test6(void)
{
asm volatile (" \
r0 = r1; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test7")
__failure __msg("R0 has unknown scalar value")
__naked void with_invalid_return_code_test7(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
r2 = *(u32*)(r1 + 4); \
r0 *= r2; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("cgroup/skb")
__description("direct packet read test#1 for CGROUP_SKB")
__success __failure_unpriv
__msg_unpriv("invalid bpf_context access off=76 size=4")
__retval(0)
__naked void test_1_for_cgroup_skb(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r4 = *(u32*)(r1 + %[__sk_buff_len]); \
r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
*(u32*)(r1 + %[__sk_buff_mark]) = r6; \
r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \
r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \
r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
__imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
__imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
__imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
: __clobber_all);
}
SEC("cgroup/skb")
__description("direct packet read test#2 for CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void test_2_for_cgroup_skb(void)
{
asm volatile (" \
r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \
r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \
r6 = *(u32*)(r1 + %[__sk_buff_priority]); \
*(u32*)(r1 + %[__sk_buff_priority]) = r6; \
r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \
r9 = *(u32*)(r1 + %[__sk_buff_hash]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
__imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
__imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
__imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
__imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
: __clobber_all);
}
SEC("cgroup/skb")
__description("direct packet read test#3 for CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void test_3_for_cgroup_skb(void)
{
asm volatile (" \
r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \
r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \
r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \
r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \
r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \
r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
*(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \
*(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \
*(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \
*(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \
*(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
__imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
__imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
__imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
__imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
__imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
: __clobber_all);
}
SEC("cgroup/skb")
__description("direct packet read test#4 for CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void test_4_for_cgroup_skb(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_family]); \
r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \
r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \
r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \
r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
__imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
__imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
__imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
__imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
__imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
__imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
__imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
__imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
__imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
__imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
__imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
__imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid access of tc_classid for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void tc_classid_for_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid access of data_meta for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void data_meta_for_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid access of flow_keys for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void flow_keys_for_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid write access to napi_id for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void napi_id_for_cgroup_skb(void)
{
asm volatile (" \
r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
*(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
: __clobber_all);
}
SEC("cgroup/skb")
__description("write tstamp from CGROUP_SKB")
__success __failure_unpriv
__msg_unpriv("invalid bpf_context access off=152 size=8")
__retval(0)
__naked void write_tstamp_from_cgroup_skb(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
: __clobber_all);
}
SEC("cgroup/skb")
__description("read tstamp from CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void read_tstamp_from_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_storage.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__uint(max_entries, 0);
__type(key, struct bpf_cgroup_storage_key);
__type(value, char[TEST_DATA_LEN]);
} cgroup_storage SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__uint(max_entries, 0);
__type(key, struct bpf_cgroup_storage_key);
__type(value, char[64]);
} percpu_cgroup_storage SEC(".maps");
SEC("cgroup/skb")
__description("valid cgroup storage access")
__success __success_unpriv __retval(0)
__naked void valid_cgroup_storage_access(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 1")
__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
__failure_unpriv
__naked void invalid_cgroup_storage_access_1(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[map_hash_8b] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 2")
__failure __msg("fd 1 is not pointing to valid bpf_map")
__failure_unpriv
__naked void invalid_cgroup_storage_access_2(void)
{
asm volatile (" \
r2 = 0; \
.8byte %[ld_map_fd]; \
.8byte 0; \
call %[bpf_get_local_storage]; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 3")
__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
__failure_unpriv
__naked void invalid_cgroup_storage_access_3(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 256); \
r1 += 1; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 4")
__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void invalid_cgroup_storage_access_4(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 - 2); \
r0 = r1; \
r1 += 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 5")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__failure_unpriv
__naked void invalid_cgroup_storage_access_5(void)
{
asm volatile (" \
r2 = 7; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 6")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__msg_unpriv("R2 leaks addr into helper function")
__naked void invalid_cgroup_storage_access_6(void)
{
asm volatile (" \
r2 = r1; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("valid per-cpu cgroup storage access")
__success __success_unpriv __retval(0)
__naked void per_cpu_cgroup_storage_access(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 1")
__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
__failure_unpriv
__naked void cpu_cgroup_storage_access_1(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[map_hash_8b] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 2")
__failure __msg("fd 1 is not pointing to valid bpf_map")
__failure_unpriv
__naked void cpu_cgroup_storage_access_2(void)
{
asm volatile (" \
r2 = 0; \
.8byte %[ld_map_fd]; \
.8byte 0; \
call %[bpf_get_local_storage]; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 3")
__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
__failure_unpriv
__naked void cpu_cgroup_storage_access_3(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 256); \
r1 += 1; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 4")
__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void cpu_cgroup_storage_access_4(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 - 2); \
r0 = r1; \
r1 += 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 5")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__failure_unpriv
__naked void cpu_cgroup_storage_access_5(void)
{
asm volatile (" \
r2 = 7; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 6")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__msg_unpriv("R2 leaks addr into helper function")
__naked void cpu_cgroup_storage_access_6(void)
{
asm volatile (" \
r2 = r1; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/const_or.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("tracepoint")
__description("constant register |= constant should keep constant type")
__success
__naked void constant_should_keep_constant_type(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r2 |= 13; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("constant register |= constant should not bypass stack boundary checks")
__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
__naked void not_bypass_stack_boundary_checks_1(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r2 |= 24; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("constant register |= constant register should keep constant type")
__success
__naked void register_should_keep_constant_type(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r4 = 13; \
r2 |= r4; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("constant register |= constant register should not bypass stack boundary checks")
__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
__naked void not_bypass_stack_boundary_checks_2(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r4 = 24; \
r2 |= r4; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("sk_msg")
__description("valid access family in SK_MSG")
__success
__naked void access_family_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_family]); \
exit; \
" :
: __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access remote_ip4 in SK_MSG")
__success
__naked void remote_ip4_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access local_ip4 in SK_MSG")
__success
__naked void local_ip4_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \
exit; \
" :
: __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access remote_port in SK_MSG")
__success
__naked void remote_port_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access local_port in SK_MSG")
__success
__naked void local_port_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \
exit; \
" :
: __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port))
: __clobber_all);
}
SEC("sk_skb")
__description("valid access remote_ip6 in SK_MSG")
__success
__naked void remote_ip6_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])),
__imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])),
__imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])),
__imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3]))
: __clobber_all);
}
SEC("sk_skb")
__description("valid access local_ip6 in SK_MSG")
__success
__naked void local_ip6_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \
exit; \
" :
: __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])),
__imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])),
__imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])),
__imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3]))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access size in SK_MSG")
__success
__naked void access_size_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_size]); \
exit; \
" :
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
: __clobber_all);
}
SEC("sk_msg")
__description("invalid 64B read of size in SK_MSG")
__failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void of_size_in_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_size]); \
exit; \
" :
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
: __clobber_all);
}
SEC("sk_msg")
__description("invalid read past end of SK_MSG")
__failure __msg("invalid bpf_context access")
__naked void past_end_of_sk_msg(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__imm_0]); \
exit; \
" :
: __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4)
: __clobber_all);
}
SEC("sk_msg")
__description("invalid read offset in SK_MSG")
__failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void read_offset_in_sk_msg(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__imm_0]); \
exit; \
" :
: __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1)
: __clobber_all);
}
SEC("sk_msg")
__description("direct packet read for SK_MSG")
__success
__naked void packet_read_for_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
SEC("sk_msg")
__description("direct packet write for SK_MSG")
__success
__naked void packet_write_for_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
SEC("sk_msg")
__description("overlapping checks for direct packet access SK_MSG")
__success
__naked void direct_packet_access_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r1 = r2; \
r1 += 6; \
if r1 > r3 goto l0_%=; \
r0 = *(u16*)(r2 + 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("direct stack access with 32-bit wraparound. test1")
__failure __msg("fp pointer and 2147483647")
__failure_unpriv
__naked void with_32_bit_wraparound_test1(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0x7fffffff; \
r1 += 0x7fffffff; \
w0 = 0; \
*(u8*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("direct stack access with 32-bit wraparound. test2")
__failure __msg("fp pointer and 1073741823")
__failure_unpriv
__naked void with_32_bit_wraparound_test2(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0x3fffffff; \
r1 += 0x3fffffff; \
w0 = 0; \
*(u8*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("direct stack access with 32-bit wraparound. test3")
__failure __msg("fp pointer offset 1073741822")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void with_32_bit_wraparound_test3(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0x1fffffff; \
r1 += 0x1fffffff; \
w0 = 0; \
*(u8*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/div0.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("DIV32 by 0, zero check 1")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_1_1(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
w2 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("DIV32 by 0, zero check 2")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_2_1(void)
{
asm volatile (" \
w0 = 42; \
r1 = 0xffffffff00000000LL ll; \
w2 = 1; \
w2 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("DIV64 by 0, zero check")
__success __success_unpriv __retval(42)
__naked void div64_by_0_zero_check(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
r2 /= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOD32 by 0, zero check 1")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_1_2(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
w2 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOD32 by 0, zero check 2")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_2_2(void)
{
asm volatile (" \
w0 = 42; \
r1 = 0xffffffff00000000LL ll; \
w2 = 1; \
w2 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOD64 by 0, zero check")
__success __success_unpriv __retval(42)
__naked void mod64_by_0_zero_check(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
r2 %%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV32 by 0, zero check ok, cls")
__success __retval(8)
__naked void _0_zero_check_ok_cls_1(void)
{
asm volatile (" \
w0 = 42; \
w1 = 2; \
w2 = 16; \
w2 /= w1; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV32 by 0, zero check 1, cls")
__success __retval(0)
__naked void _0_zero_check_1_cls_1(void)
{
asm volatile (" \
w1 = 0; \
w0 = 1; \
w0 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV32 by 0, zero check 2, cls")
__success __retval(0)
__naked void _0_zero_check_2_cls_1(void)
{
asm volatile (" \
r1 = 0xffffffff00000000LL ll; \
w0 = 1; \
w0 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV64 by 0, zero check, cls")
__success __retval(0)
__naked void by_0_zero_check_cls(void)
{
asm volatile (" \
w1 = 0; \
w0 = 1; \
r0 /= r1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD32 by 0, zero check ok, cls")
__success __retval(2)
__naked void _0_zero_check_ok_cls_2(void)
{
asm volatile (" \
w0 = 42; \
w1 = 3; \
w2 = 5; \
w2 %%= w1; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD32 by 0, zero check 1, cls")
__success __retval(1)
__naked void _0_zero_check_1_cls_2(void)
{
asm volatile (" \
w1 = 0; \
w0 = 1; \
w0 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD32 by 0, zero check 2, cls")
__success __retval(1)
__naked void _0_zero_check_2_cls_2(void)
{
asm volatile (" \
r1 = 0xffffffff00000000LL ll; \
w0 = 1; \
w0 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD64 by 0, zero check 1, cls")
__success __retval(2)
__naked void _0_zero_check_1_cls_3(void)
{
asm volatile (" \
w1 = 0; \
w0 = 2; \
r0 %%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD64 by 0, zero check 2, cls")
__success __retval(-1)
__naked void _0_zero_check_2_cls_3(void)
{
asm volatile (" \
w1 = 0; \
w0 = -1; \
r0 %%= r1; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/div_overflow.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <limits.h>
#include "bpf_misc.h"
/* Just make sure that JITs used udiv/umod as otherwise we get
* an exception from INT_MIN/-1 overflow similarly as with div
* by zero.
*/
SEC("tc")
__description("DIV32 overflow, check 1")
__success __retval(0)
__naked void div32_overflow_check_1(void)
{
asm volatile (" \
w1 = -1; \
w0 = %[int_min]; \
w0 /= w1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("DIV32 overflow, check 2")
__success __retval(0)
__naked void div32_overflow_check_2(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 /= -1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("DIV64 overflow, check 1")
__success __retval(0)
__naked void div64_overflow_check_1(void)
{
asm volatile (" \
r1 = -1; \
r2 = %[llong_min] ll; \
r2 /= r1; \
w0 = 0; \
if r0 == r2 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
SEC("tc")
__description("DIV64 overflow, check 2")
__success __retval(0)
__naked void div64_overflow_check_2(void)
{
asm volatile (" \
r1 = %[llong_min] ll; \
r1 /= -1; \
w0 = 0; \
if r0 == r1 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD32 overflow, check 1")
__success __retval(INT_MIN)
__naked void mod32_overflow_check_1(void)
{
asm volatile (" \
w1 = -1; \
w0 = %[int_min]; \
w0 %%= w1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD32 overflow, check 2")
__success __retval(INT_MIN)
__naked void mod32_overflow_check_2(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 %%= -1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD64 overflow, check 1")
__success __retval(1)
__naked void mod64_overflow_check_1(void)
{
asm volatile (" \
r1 = -1; \
r2 = %[llong_min] ll; \
r3 = r2; \
r2 %%= r1; \
w0 = 0; \
if r3 != r2 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD64 overflow, check 2")
__success __retval(1)
__naked void mod64_overflow_check_2(void)
{
asm volatile (" \
r2 = %[llong_min] ll; \
r3 = r2; \
r2 %%= -1; \
w0 = 0; \
if r3 != r2 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/helper_restricted.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct val {
int cnt;
struct bpf_spin_lock l;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct val);
} map_spin_lock SEC(".maps");
struct timer {
struct bpf_timer t;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct timer);
} map_timer SEC(".maps");
SEC("kprobe")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void in_bpf_prog_type_kprobe_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("tracepoint")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void in_bpf_prog_type_tracepoint_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("perf_event")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void bpf_prog_type_perf_event_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("raw_tracepoint")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void bpf_prog_type_raw_tracepoint_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("kprobe")
__description("bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void in_bpf_prog_type_kprobe_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("perf_event")
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void bpf_prog_type_perf_event_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("tracepoint")
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void in_bpf_prog_type_tracepoint_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("raw_tracepoint")
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void bpf_prog_type_raw_tracepoint_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("kprobe")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void in_bpf_prog_type_kprobe_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("tracepoint")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void in_bpf_prog_type_tracepoint_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("perf_event")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void bpf_prog_type_perf_event_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("raw_tracepoint")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void bpf_prog_type_raw_tracepoint_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/int_ptr.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG uninitialized")
__failure __msg("invalid indirect read from stack R4 off -16+0 size 8")
__naked void arg_ptr_to_long_uninitialized(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -8; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("socket")
__description("ARG_PTR_TO_LONG half-uninitialized")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8")
__retval(0)
__naked void ptr_to_long_half_uninitialized(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -8; \
*(u32*)(r7 + 0) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG misaligned")
__failure __msg("misaligned stack access off (0x0; 0x0)+-20+0 size 8")
__naked void arg_ptr_to_long_misaligned(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -12; \
r0 = 0; \
*(u32*)(r7 + 0) = r0; \
*(u64*)(r7 + 4) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG size < sizeof(long)")
__failure __msg("invalid indirect access to stack R4 off=-4 size=8")
__naked void to_long_size_sizeof_long(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -16; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += 12; \
*(u32*)(r7 + 0) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG initialized")
__success
__naked void arg_ptr_to_long_initialized(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -8; \
*(u64*)(r7 + 0) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/raw_tp_writable.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("raw_tracepoint.w")
__description("raw_tracepoint_writable: reject variable offset")
__failure
__msg("R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void tracepoint_writable_reject_variable_offset(void)
{
asm volatile (" \
/* r6 is our tp buffer */ \
r6 = *(u64*)(r1 + 0); \
r1 = %[map_hash_8b] ll; \
/* move the key (== 0) to r10-8 */ \
w0 = 0; \
r2 = r10; \
r2 += -8; \
*(u64*)(r2 + 0) = r0; \
/* lookup in the map */ \
call %[bpf_map_lookup_elem]; \
/* exit clean if null */ \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: /* shift the buffer pointer to a variable location */\
r0 = *(u32*)(r0 + 0); \
r6 += r0; \
/* clobber whatever's there */ \
r7 = 4242; \
*(u64*)(r6 + 0) = r7; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
#include <stdbool.h>
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
bool get_unpriv_disabled(void);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment