Commit 011832b9 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Andrii Nakryiko

bpf: Introduce may_goto instruction

Introduce may_goto instruction that from the verifier pov is similar to
open coded iterators bpf_for()/bpf_repeat() and bpf_loop() helper, but it
doesn't iterate any objects.
In assembly 'may_goto' is a nop most of the time until bpf runtime has to
terminate the program for whatever reason. In the current implementation
may_goto has a hidden counter, but other mechanisms can be used.
For programs written in C the later patch introduces 'cond_break' macro
that combines 'may_goto' with 'break' statement and has similar semantics:
cond_break is a nop until bpf runtime has to break out of this loop.
It can be used in any normal "for" or "while" loop, like

  for (i = zero; i < cnt; cond_break, i++) {

The verifier recognizes that may_goto is used in the program, reserves
additional 8 bytes of stack, initializes them in subprog prologue, and
replaces may_goto instruction with:
aux_reg = *(u64 *)(fp - 40)
if aux_reg == 0 goto pc+off
aux_reg -= 1
*(u64 *)(fp - 40) = aux_reg

may_goto instruction can be used by LLVM to implement __builtin_memcpy,
__builtin_strcmp.

may_goto is not a full substitute for bpf_for() macro.
bpf_for() doesn't have induction variable that verifiers sees,
so 'i' in bpf_for(i, 0, 100) is seen as imprecise and bounded.

But when the code is written as:
for (i = 0; i < 100; cond_break, i++)
the verifier see 'i' as precise constant zero,
hence cond_break (aka may_goto) doesn't help to converge the loop.
A static or global variable can be used as a workaround:
static int zero = 0;
for (i = zero; i < 100; cond_break, i++) // works!

may_goto works well with arena pointers that don't need to be bounds
checked on access. Load/store from arena returns imprecise unbounded
scalar and loops with may_goto pass the verifier.

Reserve new opcode BPF_JMP | BPF_JCOND for may_goto insn.
JCOND stands for conditional pseudo jump.
Since goto_or_nop insn was proposed, it may use the same opcode.
may_goto vs goto_or_nop can be distinguished by src_reg:
code = BPF_JMP | BPF_JCOND
src_reg = 0 - may_goto
src_reg = 1 - goto_or_nop
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarEduard Zingerman <eddyz87@gmail.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Tested-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20240306031929.42666-2-alexei.starovoitov@gmail.com
parent 9a9d1d36
...@@ -449,6 +449,7 @@ struct bpf_verifier_state { ...@@ -449,6 +449,7 @@ struct bpf_verifier_state {
u32 jmp_history_cnt; u32 jmp_history_cnt;
u32 dfs_depth; u32 dfs_depth;
u32 callback_unroll_depth; u32 callback_unroll_depth;
u32 may_goto_depth;
}; };
#define bpf_get_spilled_reg(slot, frame, mask) \ #define bpf_get_spilled_reg(slot, frame, mask) \
...@@ -619,6 +620,7 @@ struct bpf_subprog_info { ...@@ -619,6 +620,7 @@ struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */ u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */ u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
bool has_tail_call: 1; bool has_tail_call: 1;
bool tail_call_reachable: 1; bool tail_call_reachable: 1;
bool has_ld_abs: 1; bool has_ld_abs: 1;
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
...@@ -50,6 +51,10 @@ ...@@ -50,6 +51,10 @@
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
enum bpf_cond_pseudo_jmp {
BPF_MAY_GOTO = 0,
};
/* Register numbers */ /* Register numbers */
enum { enum {
BPF_REG_0 = 0, BPF_REG_0 = 0,
......
...@@ -1675,6 +1675,7 @@ bool bpf_opcode_in_insntable(u8 code) ...@@ -1675,6 +1675,7 @@ bool bpf_opcode_in_insntable(u8 code)
[BPF_LD | BPF_IND | BPF_B] = true, [BPF_LD | BPF_IND | BPF_B] = true,
[BPF_LD | BPF_IND | BPF_H] = true, [BPF_LD | BPF_IND | BPF_H] = true,
[BPF_LD | BPF_IND | BPF_W] = true, [BPF_LD | BPF_IND | BPF_W] = true,
[BPF_JMP | BPF_JCOND] = true,
}; };
#undef BPF_INSN_3_TBL #undef BPF_INSN_3_TBL
#undef BPF_INSN_2_TBL #undef BPF_INSN_2_TBL
......
...@@ -322,6 +322,10 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, ...@@ -322,6 +322,10 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
} else if (insn->code == (BPF_JMP | BPF_JA)) { } else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(cbs->private_data, "(%02x) goto pc%+d\n", verbose(cbs->private_data, "(%02x) goto pc%+d\n",
insn->code, insn->off); insn->code, insn->off);
} else if (insn->code == (BPF_JMP | BPF_JCOND) &&
insn->src_reg == BPF_MAY_GOTO) {
verbose(cbs->private_data, "(%02x) may_goto pc%+d\n",
insn->code, insn->off);
} else if (insn->code == (BPF_JMP32 | BPF_JA)) { } else if (insn->code == (BPF_JMP32 | BPF_JA)) {
verbose(cbs->private_data, "(%02x) gotol pc%+d\n", verbose(cbs->private_data, "(%02x) gotol pc%+d\n",
insn->code, insn->imm); insn->code, insn->imm);
......
This diff is collapsed.
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
...@@ -50,6 +51,10 @@ ...@@ -50,6 +51,10 @@
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
enum bpf_cond_pseudo_jmp {
BPF_MAY_GOTO = 0,
};
/* Register numbers */ /* Register numbers */
enum { enum {
BPF_REG_0 = 0, BPF_REG_0 = 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment