Commit b2d9002e authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Improvements for BPF_ST tracking by verifier '

Eduard Zingerman says:

====================

This patch-set is a part of preparation work for -mcpu=v4 option for
BPF C compiler (discussed in [1]). Among other things -mcpu=v4 should
enable generation of BPF_ST instruction by the compiler.

- Patches #1,2 adjust verifier to track values of constants written to
  stack using BPF_ST. Currently these are tracked imprecisely, unlike
  the writes using BPF_STX, e.g.:

    fp[-8] = 42;   currently verifier assumes that fp[-8]=mmmmmmmm
                   after such instruction, where m stands for "misc",
                   just a note that something is written at fp[-8].

    r1 = 42;       verifier tracks r1=42 after this instruction.
    fp[-8] = r1;   verifier tracks fp[-8]=42 after this instruction.

  This patch makes both cases equivalent.

- Patches #3,4 adjust verifier.c:check_stack_write_fixed_off() to
  preserve STACK_ZERO marks when BPF_ST writes zero. Currently these
  are replaced by STACK_MISC, unlike zero writes using BPF_STX, e.g.:

    ... stack range [X,Y] is marked as STACK_ZERO ...
    r0 = ... variable offset pointer to stack with range [X,Y] ...

    fp[r0] = 0;    currently verifier marks range [X,Y] as
                   STACK_MISC for such instructions.

    r1 = 0;
    fp[r0] = r1;   verifier keeps STACK_ZERO marks for range [X,Y].

  This patch makes both cases equivalent.

Motivating example for patch #1 could be found at [3].

Previous version of the patch-set is here [2], the changes are:
- Explicit initialization of fake register parent link is removed from
  verifier.c:check_stack_write_fixed_off() as parent links are now
  correctly handled by verifier.c:save_register_state().
- Original patch #1 is split in patches #1 & #3.
- Missing test case added for patch #3
  verifier.c:check_stack_write_fixed_off() adjustment.
- Test cases are updated to use .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
  which requires return value to be in the range [0,1] (original test
  cases assumed that such range is always required, which is not true).
- Original patch #3 with changes allowing BPF_ST writes to context is
  withheld for now, w/o compiler support for BPF_ST it requires some
  creative testing.
- Original patch #5 is removed from the patch-set. This patch
  contained adjustments to expected verifier error messages in some
  tests, necessary when C compiler generates BPF_ST instruction
  instead of BPF_STX (changes to expected instruction indices). These
  changes are not necessary yet.

[1] https://lore.kernel.org/bpf/01515302-c37d-2ee5-c950-2f556a4caad0@meta.com/
[2] https://lore.kernel.org/bpf/20221231163122.1360813-1-eddyz87@gmail.com/
[3] https://lore.kernel.org/bpf/f1e4282bf00aa21a72fc5906f8c3be1ae6c94a5e.camel@gmail.com/
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 62d101d5 2a33c5a2
...@@ -3473,6 +3473,11 @@ static void save_register_state(struct bpf_func_state *state, ...@@ -3473,6 +3473,11 @@ static void save_register_state(struct bpf_func_state *state,
scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
} }
static bool is_bpf_st_mem(struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
}
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers, /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access() * stack boundary and alignment are checked in check_mem_access()
*/ */
...@@ -3484,8 +3489,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -3484,8 +3489,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
{ {
struct bpf_func_state *cur; /* state of the current function */ struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
struct bpf_reg_state *reg = NULL; struct bpf_reg_state *reg = NULL;
u32 dst_reg = insn->dst_reg;
err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
if (err) if (err)
...@@ -3538,6 +3544,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -3538,6 +3544,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err; return err;
} }
save_register_state(state, spi, reg, size); save_register_state(state, spi, reg, size);
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
insn->imm != 0 && env->bpf_capable) {
struct bpf_reg_state fake_reg = {};
__mark_reg_known(&fake_reg, (u32)insn->imm);
fake_reg.type = SCALAR_VALUE;
save_register_state(state, spi, &fake_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) { } else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */ /* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) { if (size != BPF_REG_SIZE) {
...@@ -3572,7 +3585,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -3572,7 +3585,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */ /* when we zero initialize stack slots mark them as such */
if (reg && register_is_null(reg)) { if ((reg && register_is_null(reg)) ||
(!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
/* backtracking doesn't work for STACK_ZERO yet. */ /* backtracking doesn't work for STACK_ZERO yet. */
err = mark_chain_precision(env, value_regno); err = mark_chain_precision(env, value_regno);
if (err) if (err)
...@@ -3617,6 +3631,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, ...@@ -3617,6 +3631,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
int min_off, max_off; int min_off, max_off;
int i, err; int i, err;
struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
bool writing_zero = false; bool writing_zero = false;
/* set if the fact that we're writing a zero is used to let any /* set if the fact that we're writing a zero is used to let any
* stack slots remain STACK_ZERO * stack slots remain STACK_ZERO
...@@ -3629,7 +3644,8 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, ...@@ -3629,7 +3644,8 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
max_off = ptr_reg->smax_value + off + size; max_off = ptr_reg->smax_value + off + size;
if (value_regno >= 0) if (value_regno >= 0)
value_reg = &cur->regs[value_regno]; value_reg = &cur->regs[value_regno];
if (value_reg && register_is_null(value_reg)) if ((value_reg && register_is_null(value_reg)) ||
(!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
writing_zero = true; writing_zero = true;
err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
......
{ {
"bounds checks mixing signed and unsigned, positive bounds", "bounds checks mixing signed and unsigned, positive bounds",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 2), BPF_MOV64_IMM(BPF_REG_2, 2),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
...@@ -17,20 +18,21 @@ ...@@ -17,20 +18,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned", "bounds checks mixing signed and unsigned",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
...@@ -40,20 +42,21 @@ ...@@ -40,20 +42,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 2", "bounds checks mixing signed and unsigned, variant 2",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
...@@ -65,20 +68,21 @@ ...@@ -65,20 +68,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 3", "bounds checks mixing signed and unsigned, variant 3",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
...@@ -89,20 +93,21 @@ ...@@ -89,20 +93,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 4", "bounds checks mixing signed and unsigned, variant 4",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1), BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
...@@ -112,19 +117,20 @@ ...@@ -112,19 +117,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 5", "bounds checks mixing signed and unsigned, variant 5",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
...@@ -135,17 +141,20 @@ ...@@ -135,17 +141,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 6", "bounds checks mixing signed and unsigned, variant 6",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_6, -1), BPF_MOV64_IMM(BPF_REG_6, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
...@@ -163,13 +172,14 @@ ...@@ -163,13 +172,14 @@
{ {
"bounds checks mixing signed and unsigned, variant 7", "bounds checks mixing signed and unsigned, variant 7",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
...@@ -179,19 +189,20 @@ ...@@ -179,19 +189,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 8", "bounds checks mixing signed and unsigned, variant 8",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
...@@ -203,20 +214,21 @@ ...@@ -203,20 +214,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 9", "bounds checks mixing signed and unsigned, variant 9",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
...@@ -228,19 +240,20 @@ ...@@ -228,19 +240,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 10", "bounds checks mixing signed and unsigned, variant 10",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
...@@ -252,20 +265,21 @@ ...@@ -252,20 +265,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 11", "bounds checks mixing signed and unsigned, variant 11",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
...@@ -278,20 +292,21 @@ ...@@ -278,20 +292,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 12", "bounds checks mixing signed and unsigned, variant 12",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -6), BPF_MOV64_IMM(BPF_REG_2, -6),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
...@@ -303,20 +318,21 @@ ...@@ -303,20 +318,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 13", "bounds checks mixing signed and unsigned, variant 13",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 2), BPF_MOV64_IMM(BPF_REG_2, 2),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
...@@ -331,7 +347,7 @@ ...@@ -331,7 +347,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
...@@ -340,13 +356,14 @@ ...@@ -340,13 +356,14 @@
.insns = { .insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_8, 2), BPF_MOV64_IMM(BPF_REG_8, 2),
...@@ -360,20 +377,21 @@ ...@@ -360,20 +377,21 @@
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
BPF_JMP_IMM(BPF_JA, 0, 0, -7), BPF_JMP_IMM(BPF_JA, 0, 0, -7),
}, },
.fixup_map_hash_8b = { 4 }, .fixup_map_hash_8b = { 6 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{ {
"bounds checks mixing signed and unsigned, variant 15", "bounds checks mixing signed and unsigned, variant 15",
.insns = { .insns = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -6), BPF_MOV64_IMM(BPF_REG_2, -6),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
...@@ -387,7 +405,7 @@ ...@@ -387,7 +405,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.result = REJECT, .result = REJECT,
}, },
{
"BPF_ST_MEM stack imm non-zero",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 42),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -42),
/* if value is tracked correctly R0 is zero */
BPF_EXIT_INSN(),
},
.result = ACCEPT,
/* Use prog type that requires return value in range [0, 1] */
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
{
"BPF_ST_MEM stack imm zero",
.insns = {
/* mark stack 0000 0000 */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* read and sum a few bytes */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -8),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* if value is tracked correctly R0 is zero */
BPF_EXIT_INSN(),
},
.result = ACCEPT,
/* Use prog type that requires return value in range [0, 1] */
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
{
"BPF_ST_MEM stack imm zero, variable offset",
.insns = {
/* set fp[-16], fp[-24] to zeros */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
/* r0 = random value in range [-32, -15] */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JLE, BPF_REG_0, 16, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 32),
/* fp[r0] = 0, make a variable offset write of zero,
* this should preserve zero marks on stack.
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_10),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
/* r0 = fp[-20], if variable offset write was tracked correctly
* r0 would be a known zero.
*/
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_10, -20),
/* Would fail return code verification if r0 range is not tracked correctly. */
BPF_EXIT_INSN(),
},
.result = ACCEPT,
/* Use prog type that requires return value in range [0, 1] */
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment