Commit d1ef551a authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-mips-jit-improvements'

David Daney says:

====================
MIPS,bpf: Improvements for MIPS eBPF JIT

Here are several improvements and bug fixes for the MIPS eBPF JIT.

The main change is the addition of support for JLT, JLE, JSLT and JSLE
ops, that were recently added.

Also fix WARN output when used with preemptable kernel, and a small
cleanup/optimization in the use of BPF_OP(insn->code).

I suggest that the whole thing go via the BPF/net-next path as there
are dependencies on code that is not yet merged to Linus' tree.

Still pending are changes to reduce stack usage when the verifier can
determine the maximum stack size.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents da6817eb 6035b3fa
...@@ -113,6 +113,7 @@ struct jit_ctx { ...@@ -113,6 +113,7 @@ struct jit_ctx {
u64 *reg_val_types; u64 *reg_val_types;
unsigned int long_b_conversion:1; unsigned int long_b_conversion:1;
unsigned int gen_b_offsets:1; unsigned int gen_b_offsets:1;
unsigned int use_bbit_insns:1;
}; };
static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
...@@ -655,19 +656,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) ...@@ -655,19 +656,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
return build_int_epilogue(ctx, MIPS_R_T9); return build_int_epilogue(ctx, MIPS_R_T9);
} }
static bool use_bbit_insns(void)
{
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
return true;
default:
return false;
}
}
static bool is_bad_offset(int b_off) static bool is_bad_offset(int b_off)
{ {
return b_off > 0x1ffff || b_off < -0x20000; return b_off > 0x1ffff || b_off < -0x20000;
...@@ -682,6 +670,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -682,6 +670,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
unsigned int target; unsigned int target;
u64 t64; u64 t64;
s64 t64s; s64 t64s;
int bpf_op = BPF_OP(insn->code);
switch (insn->code) { switch (insn->code) {
case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
...@@ -770,13 +759,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -770,13 +759,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, sll, dst, dst, 0); emit_instr(ctx, sll, dst, dst, 0);
if (insn->imm == 1) { if (insn->imm == 1) {
/* div by 1 is a nop, mod by 1 is zero */ /* div by 1 is a nop, mod by 1 is zero */
if (BPF_OP(insn->code) == BPF_MOD) if (bpf_op == BPF_MOD)
emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
break; break;
} }
gen_imm_to_reg(insn, MIPS_R_AT, ctx); gen_imm_to_reg(insn, MIPS_R_AT, ctx);
emit_instr(ctx, divu, dst, MIPS_R_AT); emit_instr(ctx, divu, dst, MIPS_R_AT);
if (BPF_OP(insn->code) == BPF_DIV) if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst); emit_instr(ctx, mflo, dst);
else else
emit_instr(ctx, mfhi, dst); emit_instr(ctx, mfhi, dst);
...@@ -798,13 +787,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -798,13 +787,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (insn->imm == 1) { if (insn->imm == 1) {
/* div by 1 is a nop, mod by 1 is zero */ /* div by 1 is a nop, mod by 1 is zero */
if (BPF_OP(insn->code) == BPF_MOD) if (bpf_op == BPF_MOD)
emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
break; break;
} }
gen_imm_to_reg(insn, MIPS_R_AT, ctx); gen_imm_to_reg(insn, MIPS_R_AT, ctx);
emit_instr(ctx, ddivu, dst, MIPS_R_AT); emit_instr(ctx, ddivu, dst, MIPS_R_AT);
if (BPF_OP(insn->code) == BPF_DIV) if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst); emit_instr(ctx, mflo, dst);
else else
emit_instr(ctx, mfhi, dst); emit_instr(ctx, mfhi, dst);
...@@ -829,7 +818,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -829,7 +818,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
did_move = false; did_move = false;
if (insn->src_reg == BPF_REG_10) { if (insn->src_reg == BPF_REG_10) {
if (BPF_OP(insn->code) == BPF_MOV) { if (bpf_op == BPF_MOV) {
emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
did_move = true; did_move = true;
} else { } else {
...@@ -839,7 +828,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -839,7 +828,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
} else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
int tmp_reg = MIPS_R_AT; int tmp_reg = MIPS_R_AT;
if (BPF_OP(insn->code) == BPF_MOV) { if (bpf_op == BPF_MOV) {
tmp_reg = dst; tmp_reg = dst;
did_move = true; did_move = true;
} }
...@@ -847,7 +836,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -847,7 +836,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
src = MIPS_R_AT; src = MIPS_R_AT;
} }
switch (BPF_OP(insn->code)) { switch (bpf_op) {
case BPF_MOV: case BPF_MOV:
if (!did_move) if (!did_move)
emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
...@@ -879,7 +868,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -879,7 +868,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
emit_instr(ctx, ddivu, dst, src); emit_instr(ctx, ddivu, dst, src);
if (BPF_OP(insn->code) == BPF_DIV) if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst); emit_instr(ctx, mflo, dst);
else else
emit_instr(ctx, mfhi, dst); emit_instr(ctx, mfhi, dst);
...@@ -923,7 +912,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -923,7 +912,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
int tmp_reg = MIPS_R_AT; int tmp_reg = MIPS_R_AT;
if (BPF_OP(insn->code) == BPF_MOV) { if (bpf_op == BPF_MOV) {
tmp_reg = dst; tmp_reg = dst;
did_move = true; did_move = true;
} }
...@@ -931,7 +920,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -931,7 +920,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, sll, tmp_reg, src, 0); emit_instr(ctx, sll, tmp_reg, src, 0);
src = MIPS_R_AT; src = MIPS_R_AT;
} }
switch (BPF_OP(insn->code)) { switch (bpf_op) {
case BPF_MOV: case BPF_MOV:
if (!did_move) if (!did_move)
emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
...@@ -962,7 +951,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -962,7 +951,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
emit_instr(ctx, divu, dst, src); emit_instr(ctx, divu, dst, src);
if (BPF_OP(insn->code) == BPF_DIV) if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst); emit_instr(ctx, mflo, dst);
else else
emit_instr(ctx, mfhi, dst); emit_instr(ctx, mfhi, dst);
...@@ -989,7 +978,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -989,7 +978,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); cmp_eq = (bpf_op == BPF_JEQ);
dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
if (dst < 0) if (dst < 0)
return dst; return dst;
...@@ -1002,8 +991,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -1002,8 +991,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
goto jeq_common; goto jeq_common;
case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JSET | BPF_X:
...@@ -1020,33 +1013,39 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -1020,33 +1013,39 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, sll, MIPS_R_AT, dst, 0); emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
dst = MIPS_R_AT; dst = MIPS_R_AT;
} }
if (BPF_OP(insn->code) == BPF_JSET) { if (bpf_op == BPF_JSET) {
emit_instr(ctx, and, MIPS_R_AT, dst, src); emit_instr(ctx, and, MIPS_R_AT, dst, src);
cmp_eq = false; cmp_eq = false;
dst = MIPS_R_AT; dst = MIPS_R_AT;
src = MIPS_R_ZERO; src = MIPS_R_ZERO;
} else if (BPF_OP(insn->code) == BPF_JSGT) { } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
b_off = b_imm(exit_idx, ctx); b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off)) if (is_bad_offset(b_off))
return -E2BIG; return -E2BIG;
if (bpf_op == BPF_JSGT)
emit_instr(ctx, blez, MIPS_R_AT, b_off); emit_instr(ctx, blez, MIPS_R_AT, b_off);
else
emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
emit_instr(ctx, nop); emit_instr(ctx, nop);
return 2; /* We consumed the exit. */ return 2; /* We consumed the exit. */
} }
b_off = b_imm(this_idx + insn->off + 1, ctx); b_off = b_imm(this_idx + insn->off + 1, ctx);
if (is_bad_offset(b_off)) if (is_bad_offset(b_off))
return -E2BIG; return -E2BIG;
if (bpf_op == BPF_JSGT)
emit_instr(ctx, bgtz, MIPS_R_AT, b_off); emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
else
emit_instr(ctx, blez, MIPS_R_AT, b_off);
emit_instr(ctx, nop); emit_instr(ctx, nop);
break; break;
} else if (BPF_OP(insn->code) == BPF_JSGE) { } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
emit_instr(ctx, slt, MIPS_R_AT, dst, src); emit_instr(ctx, slt, MIPS_R_AT, dst, src);
cmp_eq = true; cmp_eq = bpf_op == BPF_JSGE;
dst = MIPS_R_AT; dst = MIPS_R_AT;
src = MIPS_R_ZERO; src = MIPS_R_ZERO;
} else if (BPF_OP(insn->code) == BPF_JGT) { } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
/* dst or src could be AT */ /* dst or src could be AT */
emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
emit_instr(ctx, sltu, MIPS_R_AT, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
...@@ -1054,16 +1053,16 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -1054,16 +1053,16 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
cmp_eq = true; cmp_eq = bpf_op == BPF_JGT;
dst = MIPS_R_AT; dst = MIPS_R_AT;
src = MIPS_R_ZERO; src = MIPS_R_ZERO;
} else if (BPF_OP(insn->code) == BPF_JGE) { } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
emit_instr(ctx, sltu, MIPS_R_AT, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
cmp_eq = true; cmp_eq = bpf_op == BPF_JGE;
dst = MIPS_R_AT; dst = MIPS_R_AT;
src = MIPS_R_ZERO; src = MIPS_R_ZERO;
} else { /* JNE/JEQ case */ } else { /* JNE/JEQ case */
cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); cmp_eq = (bpf_op == BPF_JEQ);
} }
jeq_common: jeq_common:
/* /*
...@@ -1122,7 +1121,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -1122,7 +1121,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
cmp_eq = (bpf_op == BPF_JSGE);
dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
if (dst < 0) if (dst < 0)
return dst; return dst;
...@@ -1132,65 +1133,92 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -1132,65 +1133,92 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
b_off = b_imm(exit_idx, ctx); b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off)) if (is_bad_offset(b_off))
return -E2BIG; return -E2BIG;
if (cmp_eq) switch (bpf_op) {
emit_instr(ctx, bltz, dst, b_off); case BPF_JSGT:
else
emit_instr(ctx, blez, dst, b_off); emit_instr(ctx, blez, dst, b_off);
break;
case BPF_JSGE:
emit_instr(ctx, bltz, dst, b_off);
break;
case BPF_JSLT:
emit_instr(ctx, bgez, dst, b_off);
break;
case BPF_JSLE:
emit_instr(ctx, bgtz, dst, b_off);
break;
}
emit_instr(ctx, nop); emit_instr(ctx, nop);
return 2; /* We consumed the exit. */ return 2; /* We consumed the exit. */
} }
b_off = b_imm(this_idx + insn->off + 1, ctx); b_off = b_imm(this_idx + insn->off + 1, ctx);
if (is_bad_offset(b_off)) if (is_bad_offset(b_off))
return -E2BIG; return -E2BIG;
if (cmp_eq) switch (bpf_op) {
emit_instr(ctx, bgez, dst, b_off); case BPF_JSGT:
else
emit_instr(ctx, bgtz, dst, b_off); emit_instr(ctx, bgtz, dst, b_off);
break;
case BPF_JSGE:
emit_instr(ctx, bgez, dst, b_off);
break;
case BPF_JSLT:
emit_instr(ctx, bltz, dst, b_off);
break;
case BPF_JSLE:
emit_instr(ctx, blez, dst, b_off);
break;
}
emit_instr(ctx, nop); emit_instr(ctx, nop);
break; break;
} }
/* /*
* only "LT" compare available, so we must use imm + 1 * only "LT" compare available, so we must use imm + 1
* to generate "GT" * to generate "GT" and imm -1 to generate LE
*/ */
t64s = insn->imm + (cmp_eq ? 0 : 1); if (bpf_op == BPF_JSGT)
t64s = insn->imm + 1;
else if (bpf_op == BPF_JSLE)
t64s = insn->imm + 1;
else
t64s = insn->imm;
cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
if (t64s >= S16_MIN && t64s <= S16_MAX) { if (t64s >= S16_MIN && t64s <= S16_MAX) {
emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
src = MIPS_R_AT; src = MIPS_R_AT;
dst = MIPS_R_ZERO; dst = MIPS_R_ZERO;
cmp_eq = true;
goto jeq_common; goto jeq_common;
} }
emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
src = MIPS_R_AT; src = MIPS_R_AT;
dst = MIPS_R_ZERO; dst = MIPS_R_ZERO;
cmp_eq = true;
goto jeq_common; goto jeq_common;
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
cmp_eq = (BPF_OP(insn->code) == BPF_JGE); case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
cmp_eq = (bpf_op == BPF_JGE);
dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
if (dst < 0) if (dst < 0)
return dst; return dst;
/* /*
* only "LT" compare available, so we must use imm + 1 * only "LT" compare available, so we must use imm + 1
* to generate "GT" * to generate "GT" and imm -1 to generate LE
*/ */
t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); if (bpf_op == BPF_JGT)
if (t64s >= 0 && t64s <= S16_MAX) { t64s = (u64)(u32)(insn->imm) + 1;
emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); else if (bpf_op == BPF_JLE)
src = MIPS_R_AT; t64s = (u64)(u32)(insn->imm) + 1;
dst = MIPS_R_ZERO; else
cmp_eq = true; t64s = (u64)(u32)(insn->imm);
goto jeq_common;
} cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
src = MIPS_R_AT; src = MIPS_R_AT;
dst = MIPS_R_ZERO; dst = MIPS_R_ZERO;
cmp_eq = true;
goto jeq_common; goto jeq_common;
case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
...@@ -1198,7 +1226,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -1198,7 +1226,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0) if (dst < 0)
return dst; return dst;
if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
b_off = b_imm(exit_idx, ctx); b_off = b_imm(exit_idx, ctx);
if (is_bad_offset(b_off)) if (is_bad_offset(b_off))
...@@ -1724,10 +1752,14 @@ static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, ...@@ -1724,10 +1752,14 @@ static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
case BPF_JEQ: case BPF_JEQ:
case BPF_JGT: case BPF_JGT:
case BPF_JGE: case BPF_JGE:
case BPF_JLT:
case BPF_JLE:
case BPF_JSET: case BPF_JSET:
case BPF_JNE: case BPF_JNE:
case BPF_JSGT: case BPF_JSGT:
case BPF_JSGE: case BPF_JSGE:
case BPF_JSLT:
case BPF_JSLE:
if (follow_taken) { if (follow_taken) {
rvt[idx] |= RVT_BRANCH_TAKEN; rvt[idx] |= RVT_BRANCH_TAKEN;
idx += insn->off; idx += insn->off;
...@@ -1853,6 +1885,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1853,6 +1885,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
preempt_disable();
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
ctx.use_bbit_insns = 1;
default:
ctx.use_bbit_insns = 0;
}
preempt_enable();
ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
if (ctx.offsets == NULL) if (ctx.offsets == NULL)
goto out_err; goto out_err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment