Commit 4cd58e9a authored by Yonghong Song's avatar Yonghong Song Committed by Alexei Starovoitov

bpf: Support new 32bit offset jmp instruction

Add interpreter/jit/verifier support for 32bit offset jmp instruction.
If a conditional jmp instruction needs more than 16bit offset,
it can be simulated with a conditional jmp + a 32bit jmp insn.
Acked-by: default avatarEduard Zingerman <eddyz87@gmail.com>
Signed-off-by: default avatarYonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230728011231.3716103-1-yonghong.song@linux.devSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7058e3a3
...@@ -1815,16 +1815,24 @@ st: if (is_imm8(insn->off)) ...@@ -1815,16 +1815,24 @@ st: if (is_imm8(insn->off))
break; break;
case BPF_JMP | BPF_JA: case BPF_JMP | BPF_JA:
if (insn->off == -1) case BPF_JMP32 | BPF_JA:
/* -1 jmp instructions will always jump if (BPF_CLASS(insn->code) == BPF_JMP) {
* backwards two bytes. Explicitly handling if (insn->off == -1)
* this case avoids wasting too many passes /* -1 jmp instructions will always jump
* when there are long sequences of replaced * backwards two bytes. Explicitly handling
* dead code. * this case avoids wasting too many passes
*/ * when there are long sequences of replaced
jmp_offset = -2; * dead code.
else */
jmp_offset = addrs[i + insn->off] - addrs[i]; jmp_offset = -2;
else
jmp_offset = addrs[i + insn->off] - addrs[i];
} else {
if (insn->imm == -1)
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->imm] - addrs[i];
}
if (!jmp_offset) { if (!jmp_offset) {
/* /*
......
...@@ -373,7 +373,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, ...@@ -373,7 +373,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
{ {
const s32 off_min = S16_MIN, off_max = S16_MAX; const s32 off_min = S16_MIN, off_max = S16_MAX;
s32 delta = end_new - end_old; s32 delta = end_new - end_old;
s32 off = insn->off; s32 off;
if (insn->code == (BPF_JMP32 | BPF_JA))
off = insn->imm;
else
off = insn->off;
if (curr < pos && curr + off + 1 >= end_old) if (curr < pos && curr + off + 1 >= end_old)
off += delta; off += delta;
...@@ -381,8 +386,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, ...@@ -381,8 +386,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
off -= delta; off -= delta;
if (off < off_min || off > off_max) if (off < off_min || off > off_max)
return -ERANGE; return -ERANGE;
if (!probe_pass) if (!probe_pass) {
insn->off = off; if (insn->code == (BPF_JMP32 | BPF_JA))
insn->imm = off;
else
insn->off = off;
}
return 0; return 0;
} }
...@@ -1593,6 +1602,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); ...@@ -1593,6 +1602,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_3(JMP, JSLE, K), \ INSN_3(JMP, JSLE, K), \
INSN_3(JMP, JSET, K), \ INSN_3(JMP, JSET, K), \
INSN_2(JMP, JA), \ INSN_2(JMP, JA), \
INSN_2(JMP32, JA), \
/* Store instructions. */ \ /* Store instructions. */ \
/* Register based. */ \ /* Register based. */ \
INSN_3(STX, MEM, B), \ INSN_3(STX, MEM, B), \
...@@ -1989,6 +1999,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) ...@@ -1989,6 +1999,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
JMP_JA: JMP_JA:
insn += insn->off; insn += insn->off;
CONT; CONT;
JMP32_JA:
insn += insn->imm;
CONT;
JMP_EXIT: JMP_EXIT:
return BPF_R0; return BPF_R0;
/* JMP */ /* JMP */
......
...@@ -2855,7 +2855,10 @@ static int check_subprogs(struct bpf_verifier_env *env) ...@@ -2855,7 +2855,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
goto next; goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next; goto next;
off = i + insn[i].off + 1; if (code == (BPF_JMP32 | BPF_JA))
off = i + insn[i].imm + 1;
else
off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) { if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off); verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL; return -EINVAL;
...@@ -2867,6 +2870,7 @@ static int check_subprogs(struct bpf_verifier_env *env) ...@@ -2867,6 +2870,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
* or unconditional jump back * or unconditional jump back
*/ */
if (code != (BPF_JMP | BPF_EXIT) && if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP32 | BPF_JA) &&
code != (BPF_JMP | BPF_JA)) { code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n"); verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL; return -EINVAL;
...@@ -14792,7 +14796,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, ...@@ -14792,7 +14796,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
static int visit_insn(int t, struct bpf_verifier_env *env) static int visit_insn(int t, struct bpf_verifier_env *env)
{ {
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
int ret; int ret, off;
if (bpf_pseudo_func(insn)) if (bpf_pseudo_func(insn))
return visit_func_call_insn(t, insns, env, true); return visit_func_call_insn(t, insns, env, true);
...@@ -14840,14 +14844,19 @@ static int visit_insn(int t, struct bpf_verifier_env *env) ...@@ -14840,14 +14844,19 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
if (BPF_SRC(insn->code) != BPF_K) if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL; return -EINVAL;
if (BPF_CLASS(insn->code) == BPF_JMP)
off = insn->off;
else
off = insn->imm;
/* unconditional jump with single edge */ /* unconditional jump with single edge */
ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env, ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
true); true);
if (ret) if (ret)
return ret; return ret;
mark_prune_point(env, t + insn->off + 1); mark_prune_point(env, t + off + 1);
mark_jmp_point(env, t + insn->off + 1); mark_jmp_point(env, t + off + 1);
return ret; return ret;
...@@ -16643,15 +16652,18 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -16643,15 +16652,18 @@ static int do_check(struct bpf_verifier_env *env)
mark_reg_scratched(env, BPF_REG_0); mark_reg_scratched(env, BPF_REG_0);
} else if (opcode == BPF_JA) { } else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K || if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 || insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0 ||
class == BPF_JMP32) { (class == BPF_JMP && insn->imm != 0) ||
(class == BPF_JMP32 && insn->off != 0)) {
verbose(env, "BPF_JA uses reserved fields\n"); verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL; return -EINVAL;
} }
env->insn_idx += insn->off + 1; if (class == BPF_JMP)
env->insn_idx += insn->off + 1;
else
env->insn_idx += insn->imm + 1;
continue; continue;
} else if (opcode == BPF_EXIT) { } else if (opcode == BPF_EXIT) {
...@@ -17498,13 +17510,13 @@ static bool insn_is_cond_jump(u8 code) ...@@ -17498,13 +17510,13 @@ static bool insn_is_cond_jump(u8 code)
{ {
u8 op; u8 op;
op = BPF_OP(code);
if (BPF_CLASS(code) == BPF_JMP32) if (BPF_CLASS(code) == BPF_JMP32)
return true; return op != BPF_JA;
if (BPF_CLASS(code) != BPF_JMP) if (BPF_CLASS(code) != BPF_JMP)
return false; return false;
op = BPF_OP(code);
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment