Commit 0f55f9ed authored by Christy Lee's avatar Christy Lee Committed by Alexei Starovoitov

bpf: Only print scratched registers and stack slots to verifier logs.

When printing verifier state for any log level, print full verifier
state only on function calls or on errors. Otherwise, only print the
registers and stack slots that were accessed.

Log size differences:

verif_scale_loop6 before: 234566564
verif_scale_loop6 after: 72143943
69% size reduction

kfree_skb before: 166406
kfree_skb after: 55386
69% size reduction

Before:

156: (61) r0 = *(u32 *)(r1 +0)
157: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=ctx(id=0,off=0,imm=0) R2_w=invP0 R10=fp0 fp-8_w=00000000 fp-16_w=00\
000000 fp-24_w=00000000 fp-32_w=00000000 fp-40_w=00000000 fp-48_w=00000000 fp-56_w=00000000 fp-64_w=00000000 fp-72_w=00000000 fp-80_w=00000\
000 fp-88_w=00000000 fp-96_w=00000000 fp-104_w=00000000 fp-112_w=00000000 fp-120_w=00000000 fp-128_w=00000000 fp-136_w=00000000 fp-144_w=00\
000000 fp-152_w=00000000 fp-160_w=00000000 fp-168_w=00000000 fp-176_w=00000000 fp-184_w=00000000 fp-192_w=00000000 fp-200_w=00000000 fp-208\
_w=00000000 fp-216_w=00000000 fp-224_w=00000000 fp-232_w=00000000 fp-240_w=00000000 fp-248_w=00000000 fp-256_w=00000000 fp-264_w=00000000 f\
p-272_w=00000000 fp-280_w=00000000 fp-288_w=00000000 fp-296_w=00000000 fp-304_w=00000000 fp-312_w=00000000 fp-320_w=00000000 fp-328_w=00000\
000 fp-336_w=00000000 fp-344_w=00000000 fp-352_w=00000000 fp-360_w=00000000 fp-368_w=00000000 fp-376_w=00000000 fp-384_w=00000000 fp-392_w=\
00000000 fp-400_w=00000000 fp-408_w=00000000 fp-416_w=00000000 fp-424_w=00000000 fp-432_w=00000000 fp-440_w=00000000 fp-448_w=00000000
; return skb->len;
157: (95) exit
Func#4 is safe for any args that match its prototype
Validating get_constant() func#5...
158: R1=invP(id=0) R10=fp0
; int get_constant(long val)
158: (bf) r0 = r1
159: R0_w=invP(id=1) R1=invP(id=1) R10=fp0
; return val - 122;
159: (04) w0 += -122
160: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=invP(id=1) R10=fp0
; return val - 122;
160: (95) exit
Func#5 is safe for any args that match its prototype
Validating get_skb_ifindex() func#6...
161: R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R3=invP(id=0) R10=fp0
; int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
161: (bc) w0 = w3
162: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R3=invP(id=0) R10=fp0

After:

156: (61) r0 = *(u32 *)(r1 +0)
157: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=ctx(id=0,off=0,imm=0)
; return skb->len;
157: (95) exit
Func#4 is safe for any args that match its prototype
Validating get_constant() func#5...
158: R1=invP(id=0) R10=fp0
; int get_constant(long val)
158: (bf) r0 = r1
159: R0_w=invP(id=1) R1=invP(id=1)
; return val - 122;
159: (04) w0 += -122
160: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff))
; return val - 122;
160: (95) exit
Func#5 is safe for any args that match its prototype
Validating get_skb_ifindex() func#6...
161: R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R3=invP(id=0) R10=fp0
; int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
161: (bc) w0 = w3
162: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R3=invP(id=0)
Signed-off-by: default avatarChristy Lee <christylee@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211216213358.3374427-2-christylee@fb.com
parent 4658e15d
...@@ -474,6 +474,13 @@ struct bpf_verifier_env { ...@@ -474,6 +474,13 @@ struct bpf_verifier_env {
/* longest register parentage chain walked for liveness marking */ /* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk; u32 longest_mark_read_walk;
bpfptr_t fd_array; bpfptr_t fd_array;
/* bit mask to keep track of whether a register has been accessed
* since the last time the function state was printed
*/
u32 scratched_regs;
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
}; };
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
......
...@@ -609,6 +609,44 @@ static const char *kernel_type_name(const struct btf* btf, u32 id) ...@@ -609,6 +609,44 @@ static const char *kernel_type_name(const struct btf* btf, u32 id)
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
} }
static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
}
static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
{
env->scratched_stack_slots |= 1UL << spi;
}
static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
{
return (env->scratched_regs >> regno) & 1;
}
static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
{
return (env->scratched_stack_slots >> regno) & 1;
}
static bool verifier_state_scratched(const struct bpf_verifier_env *env)
{
return env->scratched_regs || env->scratched_stack_slots;
}
static void mark_verifier_state_clean(struct bpf_verifier_env *env)
{
env->scratched_regs = 0U;
env->scratched_stack_slots = 0UL;
}
/* Used for printing the entire verifier state. */
static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
{
env->scratched_regs = ~0U;
env->scratched_stack_slots = ~0UL;
}
/* The reg state of a pointer or a bounded scalar was saved when /* The reg state of a pointer or a bounded scalar was saved when
* it was spilled to the stack. * it was spilled to the stack.
*/ */
...@@ -624,7 +662,8 @@ static void scrub_spilled_slot(u8 *stype) ...@@ -624,7 +662,8 @@ static void scrub_spilled_slot(u8 *stype)
} }
static void print_verifier_state(struct bpf_verifier_env *env, static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state) const struct bpf_func_state *state,
bool print_all)
{ {
const struct bpf_reg_state *reg; const struct bpf_reg_state *reg;
enum bpf_reg_type t; enum bpf_reg_type t;
...@@ -637,6 +676,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -637,6 +676,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
t = reg->type; t = reg->type;
if (t == NOT_INIT) if (t == NOT_INIT)
continue; continue;
if (!print_all && !reg_scratched(env, i))
continue;
verbose(env, " R%d", i); verbose(env, " R%d", i);
print_liveness(env, reg->live); print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]); verbose(env, "=%s", reg_type_str[t]);
...@@ -726,6 +767,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -726,6 +767,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
types_buf[BPF_REG_SIZE] = 0; types_buf[BPF_REG_SIZE] = 0;
if (!valid) if (!valid)
continue; continue;
if (!print_all && !stack_slot_scratched(env, i))
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live); print_liveness(env, state->stack[i].spilled_ptr.live);
if (is_spilled_reg(&state->stack[i])) { if (is_spilled_reg(&state->stack[i])) {
...@@ -751,6 +794,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -751,6 +794,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
if (state->in_async_callback_fn) if (state->in_async_callback_fn)
verbose(env, " async_cb"); verbose(env, " async_cb");
verbose(env, "\n"); verbose(env, "\n");
mark_verifier_state_clean(env);
} }
/* copy array src of length n * size bytes to dst. dst is reallocated if it's too /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
...@@ -1541,6 +1585,7 @@ static void init_func_state(struct bpf_verifier_env *env, ...@@ -1541,6 +1585,7 @@ static void init_func_state(struct bpf_verifier_env *env,
state->frameno = frameno; state->frameno = frameno;
state->subprogno = subprogno; state->subprogno = subprogno;
init_reg_state(env, state); init_reg_state(env, state);
mark_verifier_state_scratched(env);
} }
/* Similar to push_stack(), but for async callbacks */ /* Similar to push_stack(), but for async callbacks */
...@@ -2228,6 +2273,8 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, ...@@ -2228,6 +2273,8 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return -EINVAL; return -EINVAL;
} }
mark_reg_scratched(env, regno);
reg = &regs[regno]; reg = &regs[regno];
rw64 = is_reg64(env, insn, regno, reg, t); rw64 = is_reg64(env, insn, regno, reg, t);
if (t == SRC_OP) { if (t == SRC_OP) {
...@@ -2678,7 +2725,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, ...@@ -2678,7 +2725,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
reg->precise = true; reg->precise = true;
} }
if (env->log.level & BPF_LOG_LEVEL) { if (env->log.level & BPF_LOG_LEVEL) {
print_verifier_state(env, func); print_verifier_state(env, func, false);
verbose(env, "parent %s regs=%x stack=%llx marks\n", verbose(env, "parent %s regs=%x stack=%llx marks\n",
new_marks ? "didn't have" : "already had", new_marks ? "didn't have" : "already had",
reg_mask, stack_mask); reg_mask, stack_mask);
...@@ -2837,6 +2884,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -2837,6 +2884,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true; env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
} }
mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) { !register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) { if (dst_reg != BPF_REG_FP) {
...@@ -2958,6 +3006,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, ...@@ -2958,6 +3006,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
slot = -i - 1; slot = -i - 1;
spi = slot / BPF_REG_SIZE; spi = slot / BPF_REG_SIZE;
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
mark_stack_slot_scratched(env, spi);
if (!env->allow_ptr_leaks if (!env->allow_ptr_leaks
&& *stype != NOT_INIT && *stype != NOT_INIT
...@@ -3376,7 +3425,7 @@ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, ...@@ -3376,7 +3425,7 @@ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
* to make sure our theoretical access will be safe. * to make sure our theoretical access will be safe.
*/ */
if (env->log.level & BPF_LOG_LEVEL) if (env->log.level & BPF_LOG_LEVEL)
print_verifier_state(env, state); print_verifier_state(env, state, false);
/* The minimum value is only important with signed /* The minimum value is only important with signed
* comparisons where we can't assume the floor of a * comparisons where we can't assume the floor of a
...@@ -6011,9 +6060,9 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6011,9 +6060,9 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
if (env->log.level & BPF_LOG_LEVEL) { if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "caller:\n"); verbose(env, "caller:\n");
print_verifier_state(env, caller); print_verifier_state(env, caller, true);
verbose(env, "callee:\n"); verbose(env, "callee:\n");
print_verifier_state(env, callee); print_verifier_state(env, callee, true);
} }
return 0; return 0;
} }
...@@ -6228,9 +6277,9 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) ...@@ -6228,9 +6277,9 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
*insn_idx = callee->callsite + 1; *insn_idx = callee->callsite + 1;
if (env->log.level & BPF_LOG_LEVEL) { if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "returning from callee:\n"); verbose(env, "returning from callee:\n");
print_verifier_state(env, callee); print_verifier_state(env, callee, true);
verbose(env, "to caller at %d:\n", *insn_idx); verbose(env, "to caller at %d:\n", *insn_idx);
print_verifier_state(env, caller); print_verifier_state(env, caller, true);
} }
/* clear everything in the callee */ /* clear everything in the callee */
free_func_state(callee); free_func_state(callee);
...@@ -8249,12 +8298,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, ...@@ -8249,12 +8298,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* Got here implies adding two SCALAR_VALUEs */ /* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) { if (WARN_ON_ONCE(ptr_reg)) {
print_verifier_state(env, state); print_verifier_state(env, state, true);
verbose(env, "verifier internal error: unexpected ptr_reg\n"); verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL; return -EINVAL;
} }
if (WARN_ON(!src_reg)) { if (WARN_ON(!src_reg)) {
print_verifier_state(env, state); print_verifier_state(env, state, true);
verbose(env, "verifier internal error: no src_reg\n"); verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL; return -EINVAL;
} }
...@@ -9389,7 +9438,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -9389,7 +9438,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EACCES; return -EACCES;
} }
if (env->log.level & BPF_LOG_LEVEL) if (env->log.level & BPF_LOG_LEVEL)
print_verifier_state(env, this_branch->frame[this_branch->curframe]); print_verifier_state(env, this_branch->frame[this_branch->curframe], false);
return 0; return 0;
} }
...@@ -11259,14 +11308,17 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -11259,14 +11308,17 @@ static int do_check(struct bpf_verifier_env *env)
if (env->log.level & BPF_LOG_LEVEL2 || if (env->log.level & BPF_LOG_LEVEL2 ||
(env->log.level & BPF_LOG_LEVEL && do_print_state)) { (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
if (env->log.level & BPF_LOG_LEVEL2) if (env->log.level & BPF_LOG_LEVEL2) {
verbose(env, "%d:", env->insn_idx); if (verifier_state_scratched(env))
else verbose(env, "%d:", env->insn_idx);
} else {
verbose(env, "\nfrom %d to %d%s:", verbose(env, "\nfrom %d to %d%s:",
env->prev_insn_idx, env->insn_idx, env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ? env->cur_state->speculative ?
" (speculative execution)" : ""); " (speculative execution)" : "");
print_verifier_state(env, state->frame[state->curframe]); }
print_verifier_state(env, state->frame[state->curframe],
false);
do_print_state = false; do_print_state = false;
} }
...@@ -11488,6 +11540,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -11488,6 +11540,7 @@ static int do_check(struct bpf_verifier_env *env)
if (err) if (err)
return err; return err;
process_bpf_exit: process_bpf_exit:
mark_verifier_state_scratched(env);
update_branch_counts(env, env->cur_state); update_branch_counts(env, env->cur_state);
err = pop_stack(env, &prev_insn_idx, err = pop_stack(env, &prev_insn_idx,
&env->insn_idx, pop_log); &env->insn_idx, pop_log);
...@@ -14148,6 +14201,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) ...@@ -14148,6 +14201,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
} }
} }
mark_verifier_state_clean(env);
if (IS_ERR(btf_vmlinux)) { if (IS_ERR(btf_vmlinux)) {
/* Either gcc or pahole or kernel are broken. */ /* Either gcc or pahole or kernel are broken. */
verbose(env, "in-kernel BTF is malformed\n"); verbose(env, "in-kernel BTF is malformed\n");
......
...@@ -39,8 +39,8 @@ static struct bpf_align_test tests[] = { ...@@ -39,8 +39,8 @@ static struct bpf_align_test tests[] = {
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = { .matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"}, {0, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"}, {0, "R10=fp0"},
{1, "R3_w=inv2"}, {1, "R3_w=inv2"},
{2, "R3_w=inv4"}, {2, "R3_w=inv4"},
{3, "R3_w=inv8"}, {3, "R3_w=inv8"},
...@@ -67,8 +67,8 @@ static struct bpf_align_test tests[] = { ...@@ -67,8 +67,8 @@ static struct bpf_align_test tests[] = {
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = { .matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"}, {0, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"}, {0, "R10=fp0"},
{1, "R3_w=inv1"}, {1, "R3_w=inv1"},
{2, "R3_w=inv2"}, {2, "R3_w=inv2"},
{3, "R3_w=inv4"}, {3, "R3_w=inv4"},
...@@ -96,8 +96,8 @@ static struct bpf_align_test tests[] = { ...@@ -96,8 +96,8 @@ static struct bpf_align_test tests[] = {
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = { .matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"}, {0, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"}, {0, "R10=fp0"},
{1, "R3_w=inv4"}, {1, "R3_w=inv4"},
{2, "R3_w=inv8"}, {2, "R3_w=inv8"},
{3, "R3_w=inv10"}, {3, "R3_w=inv10"},
...@@ -118,8 +118,8 @@ static struct bpf_align_test tests[] = { ...@@ -118,8 +118,8 @@ static struct bpf_align_test tests[] = {
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = { .matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"}, {0, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"}, {0, "R10=fp0"},
{1, "R3_w=inv7"}, {1, "R3_w=inv7"},
{2, "R3_w=inv7"}, {2, "R3_w=inv7"},
{3, "R3_w=inv14"}, {3, "R3_w=inv14"},
...@@ -161,13 +161,13 @@ static struct bpf_align_test tests[] = { ...@@ -161,13 +161,13 @@ static struct bpf_align_test tests[] = {
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = { .matches = {
{7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"}, {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
{18, "R3=pkt_end(id=0,off=0,imm=0)"}, {13, "R3_w=pkt_end(id=0,off=0,imm=0)"},
{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
...@@ -234,10 +234,10 @@ static struct bpf_align_test tests[] = { ...@@ -234,10 +234,10 @@ static struct bpf_align_test tests[] = {
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = { .matches = {
{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, {3, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
{10, "R2=pkt(id=0,off=0,r=18,imm=0)"}, {9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
{10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
...@@ -296,7 +296,7 @@ static struct bpf_align_test tests[] = { ...@@ -296,7 +296,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known /* Calculated offset in R6 has unknown value, but known
* alignment of 4. * alignment of 4.
*/ */
{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in /* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6. * known fixed offset, and variable offset from R6.
...@@ -386,7 +386,7 @@ static struct bpf_align_test tests[] = { ...@@ -386,7 +386,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known /* Calculated offset in R6 has unknown value, but known
* alignment of 4. * alignment of 4.
*/ */
{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */ /* Adding 14 makes R6 be (4n+2) */
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
...@@ -458,7 +458,7 @@ static struct bpf_align_test tests[] = { ...@@ -458,7 +458,7 @@ static struct bpf_align_test tests[] = {
/* Checked s>=0 */ /* Checked s>=0 */
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */ /* packet pointer + nonnegative (4n+2) */
{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, {12, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able * We checked the bounds, but it might have been able
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment