Commit 1651e39e authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-improvements-and-fixes'

Daniel Borkmann says:

====================
This set contains a small cleanup in cBPF prologue generation and
otherwise fixes an outstanding issue related to BPF to BPF calls
and exception handling. For details please see related patches.
Last but not least, BPF selftests is extended with several new
test cases.

Thanks!
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c25ef6a5 21ccaf21
...@@ -363,15 +363,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) ...@@ -363,15 +363,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
{ {
const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp = bpf2a32[TMP_REG_1];
s32 jmp_offset;
/* checks if divisor is zero or not. If it is, then
* exit directly.
*/
emit(ARM_CMP_I(rn, 0), ctx);
_emit(ARM_COND_EQ, ARM_MOV_I(ARM_R0, 0), ctx);
jmp_offset = epilogue_offset(ctx);
_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
#if __LINUX_ARM_ARCH__ == 7 #if __LINUX_ARM_ARCH__ == 7
if (elf_hwcap & HWCAP_IDIVA) { if (elf_hwcap & HWCAP_IDIVA) {
if (op == BPF_DIV) if (op == BPF_DIV)
......
...@@ -390,18 +390,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -390,18 +390,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_X:
{
const u8 r0 = bpf2a64[BPF_REG_0];
/* if (src == 0) return 0 */
jmp_offset = 3; /* skip ahead to else path */
check_imm19(jmp_offset);
emit(A64_CBNZ(is64, src, jmp_offset), ctx);
emit(A64_MOVZ(1, r0, 0, 0), ctx);
jmp_offset = epilogue_offset(ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
/* else */
switch (BPF_OP(code)) { switch (BPF_OP(code)) {
case BPF_DIV: case BPF_DIV:
emit(A64_UDIV(is64, dst, dst, src), ctx); emit(A64_UDIV(is64, dst, dst, src), ctx);
...@@ -413,7 +401,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -413,7 +401,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break; break;
} }
break; break;
}
case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X: case BPF_ALU64 | BPF_LSH | BPF_X:
emit(A64_LSLV(is64, dst, dst, src), ctx); emit(A64_LSLV(is64, dst, dst, src), ctx);
......
...@@ -741,16 +741,11 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -741,16 +741,11 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */ case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */ case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
if (insn->imm == 0)
return -EINVAL;
dst = ebpf_to_mips_reg(ctx, insn, dst_reg); dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
if (dst < 0) if (dst < 0)
return dst; return dst;
if (insn->imm == 0) { /* Div by zero */
b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
}
td = get_reg_val_type(ctx, this_idx, insn->dst_reg); td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
/* sign extend */ /* sign extend */
...@@ -770,19 +765,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -770,19 +765,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */ case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */ case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
if (insn->imm == 0)
return -EINVAL;
dst = ebpf_to_mips_reg(ctx, insn, dst_reg); dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
if (dst < 0) if (dst < 0)
return dst; return dst;
if (insn->imm == 0) { /* Div by zero */
b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
}
if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
if (insn->imm == 1) { if (insn->imm == 1) {
/* div by 1 is a nop, mod by 1 is zero */ /* div by 1 is a nop, mod by 1 is zero */
if (bpf_op == BPF_MOD) if (bpf_op == BPF_MOD)
...@@ -860,11 +849,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -860,11 +849,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_DIV: case BPF_DIV:
case BPF_MOD: case BPF_MOD:
b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
emit_instr(ctx, ddivu, dst, src); emit_instr(ctx, ddivu, dst, src);
if (bpf_op == BPF_DIV) if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst); emit_instr(ctx, mflo, dst);
...@@ -943,11 +927,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -943,11 +927,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_DIV: case BPF_DIV:
case BPF_MOD: case BPF_MOD:
b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
emit_instr(ctx, divu, dst, src); emit_instr(ctx, divu, dst, src);
if (bpf_op == BPF_DIV) if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst); emit_instr(ctx, mflo, dst);
......
...@@ -381,10 +381,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -381,10 +381,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
goto bpf_alu32_trunc; goto bpf_alu32_trunc;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
PPC_CMPWI(src_reg, 0);
PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
if (BPF_OP(code) == BPF_MOD) { if (BPF_OP(code) == BPF_MOD) {
PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg); PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
PPC_MULW(b2p[TMP_REG_1], src_reg, PPC_MULW(b2p[TMP_REG_1], src_reg,
...@@ -395,10 +391,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -395,10 +391,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
goto bpf_alu32_trunc; goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
PPC_CMPDI(src_reg, 0);
PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
if (BPF_OP(code) == BPF_MOD) { if (BPF_OP(code) == BPF_MOD) {
PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg); PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
PPC_MULD(b2p[TMP_REG_1], src_reg, PPC_MULD(b2p[TMP_REG_1], src_reg,
......
...@@ -610,11 +610,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -610,11 +610,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
{ {
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
jit->seen |= SEEN_RET0;
/* ltr %src,%src (if src == 0 goto fail) */
EMIT2(0x1200, src_reg, src_reg);
/* jz <ret0> */
EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
/* lhi %w0,0 */ /* lhi %w0,0 */
EMIT4_IMM(0xa7080000, REG_W0, 0); EMIT4_IMM(0xa7080000, REG_W0, 0);
/* lr %w1,%dst */ /* lr %w1,%dst */
...@@ -630,11 +625,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -630,11 +625,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
{ {
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
jit->seen |= SEEN_RET0;
/* ltgr %src,%src (if src == 0 goto fail) */
EMIT4(0xb9020000, src_reg, src_reg);
/* jz <ret0> */
EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
/* lghi %w0,0 */ /* lghi %w0,0 */
EMIT4_IMM(0xa7090000, REG_W0, 0); EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */ /* lgr %w1,%dst */
......
...@@ -967,31 +967,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -967,31 +967,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
emit_alu(MULX, src, dst, ctx); emit_alu(MULX, src, dst, ctx);
break; break;
case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_DIV | BPF_X:
emit_cmp(src, G0, ctx);
emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
emit_write_y(G0, ctx); emit_write_y(G0, ctx);
emit_alu(DIV, src, dst, ctx); emit_alu(DIV, src, dst, ctx);
break; break;
case BPF_ALU64 | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X:
emit_cmp(src, G0, ctx);
emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
emit_alu(UDIVX, src, dst, ctx); emit_alu(UDIVX, src, dst, ctx);
break; break;
case BPF_ALU | BPF_MOD | BPF_X: { case BPF_ALU | BPF_MOD | BPF_X: {
const u8 tmp = bpf2sparc[TMP_REG_1]; const u8 tmp = bpf2sparc[TMP_REG_1];
ctx->tmp_1_used = true; ctx->tmp_1_used = true;
emit_cmp(src, G0, ctx);
emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
emit_write_y(G0, ctx); emit_write_y(G0, ctx);
emit_alu3(DIV, dst, src, tmp, ctx); emit_alu3(DIV, dst, src, tmp, ctx);
emit_alu3(MULX, tmp, src, tmp, ctx); emit_alu3(MULX, tmp, src, tmp, ctx);
...@@ -1003,10 +989,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -1003,10 +989,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
ctx->tmp_1_used = true; ctx->tmp_1_used = true;
emit_cmp(src, G0, ctx);
emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
emit_alu3(UDIVX, dst, src, tmp, ctx); emit_alu3(UDIVX, dst, src, tmp, ctx);
emit_alu3(MULX, tmp, src, tmp, ctx); emit_alu3(MULX, tmp, src, tmp, ctx);
emit_alu3(SUB, dst, tmp, dst, ctx); emit_alu3(SUB, dst, tmp, dst, ctx);
......
...@@ -568,26 +568,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -568,26 +568,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
*/ */
EMIT2(0x31, 0xd2); EMIT2(0x31, 0xd2);
if (BPF_SRC(insn->code) == BPF_X) {
/* if (src_reg == 0) return 0 */
/* cmp r11, 0 */
EMIT4(0x49, 0x83, 0xFB, 0x00);
/* jne .+9 (skip over pop, pop, xor and jmp) */
EMIT2(X86_JNE, 1 + 1 + 2 + 5);
EMIT1(0x5A); /* pop rdx */
EMIT1(0x58); /* pop rax */
EMIT2(0x31, 0xc0); /* xor eax, eax */
/* jmp cleanup_addr
* addrs[i] - 11, because there are 11 bytes
* after this insn: div, mov, pop, pop, mov
*/
jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
EMIT1_off32(0xE9, jmp_offset);
}
if (BPF_CLASS(insn->code) == BPF_ALU64) if (BPF_CLASS(insn->code) == BPF_ALU64)
/* div r11 */ /* div r11 */
EMIT3(0x49, 0xF7, 0xF3); EMIT3(0x49, 0xF7, 0xF3);
......
...@@ -688,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) ...@@ -688,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp); void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags); gfp_t gfp_extra_flags);
......
This diff is collapsed.
...@@ -4981,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) ...@@ -4981,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
next_insn: next_insn:
insn++; insn++;
i++; i++;
continue;
}
/* Basic sanity check before we invest more work here. */
if (!bpf_opcode_in_insntable(insn->code)) {
verbose(env, "unknown opcode %02x\n", insn->code);
return -EINVAL;
} }
} }
...@@ -5064,14 +5071,21 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of ...@@ -5064,14 +5071,21 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog; return new_prog;
} }
/* The verifier does more data flow analysis than llvm and will not explore /* The verifier does more data flow analysis than llvm and will not
* branches that are dead at run time. Malicious programs can have dead code * explore branches that are dead at run time. Malicious programs can
* too. Therefore replace all dead at-run-time code with nops. * have dead code too. Therefore replace all dead at-run-time code
* with 'ja -1'.
*
* Just nops are not optimal, e.g. if they would sit at the end of the
* program and through another bug we would manage to jump there, then
* we'd execute beyond program memory otherwise. Returning exception
* code also wouldn't work since we can have subprogs where the dead
* code could be located.
*/ */
static void sanitize_dead_code(struct bpf_verifier_env *env) static void sanitize_dead_code(struct bpf_verifier_env *env)
{ {
struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
struct bpf_insn *insn = env->prog->insnsi; struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len; const int insn_cnt = env->prog->len;
int i; int i;
...@@ -5079,7 +5093,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env) ...@@ -5079,7 +5093,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) { for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen) if (aux_data[i].seen)
continue; continue;
memcpy(insn + i, &nop, sizeof(nop)); memcpy(insn + i, &trap, sizeof(trap));
} }
} }
...@@ -5386,15 +5400,37 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -5386,15 +5400,37 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
int i, cnt, delta = 0; int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) || if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
/* due to JIT bugs clear upper 32-bits of src register bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
* before div/mod operation struct bpf_insn mask_and_div[] = {
*/ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg); /* Rx div 0 -> 0 */
insn_buf[1] = *insn; BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
cnt = 2; BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
struct bpf_insn mask_and_mod[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx mod 0 -> Rx */
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
*insn,
};
struct bpf_insn *patchlet;
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
patchlet = mask_and_div + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
} else {
patchlet = mask_and_mod + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
}
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog) if (!new_prog)
return -ENOMEM; return -ENOMEM;
......
...@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = { ...@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = {
{ { 4, 0 }, { 5, 10 } } { { 4, 0 }, { 5, 10 } }
}, },
{ {
"INT: DIV by zero", /* This one doesn't go through verifier, but is just raw insn
* as opposed to cBPF tests from here. Thus div by 0 tests are
* done in test_verifier in BPF kselftests.
*/
"INT: DIV by -1",
.u.insns_int = { .u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1), BPF_ALU64_REG(BPF_MOV, R6, R1),
BPF_ALU64_IMM(BPF_MOV, R7, 0), BPF_ALU64_IMM(BPF_MOV, R7, -1),
BPF_LD_ABS(BPF_B, 3), BPF_LD_ABS(BPF_B, 3),
BPF_ALU32_REG(BPF_DIV, R0, R7), BPF_ALU32_REG(BPF_DIV, R0, R7),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
......
...@@ -401,8 +401,8 @@ static int bpf_convert_filter(struct sock_filter *prog, int len, ...@@ -401,8 +401,8 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
/* Classic BPF expects A and X to be reset first. These need /* Classic BPF expects A and X to be reset first. These need
* to be guaranteed to be the first two instructions. * to be guaranteed to be the first two instructions.
*/ */
*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
/* All programs must keep CTX in callee saved BPF_REG_CTX. /* All programs must keep CTX in callee saved BPF_REG_CTX.
* In eBPF case it's done by the compiler, here we need to * In eBPF case it's done by the compiler, here we need to
...@@ -459,8 +459,15 @@ static int bpf_convert_filter(struct sock_filter *prog, int len, ...@@ -459,8 +459,15 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
break; break;
if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
fp->code == (BPF_ALU | BPF_MOD | BPF_X)) fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
*insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
/* Error with exception code on div/mod by 0.
* For cBPF programs, this was always return 0.
*/
*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
*insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
*insn++ = BPF_EXIT_INSN();
}
*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
break; break;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment