Commit fd49591c authored by Luke Nelson's avatar Luke Nelson Committed by Will Deacon

bpf, arm64: Optimize AND,OR,XOR,JSET BPF_K using arm64 logical immediates

The current code for BPF_{AND,OR,XOR,JSET} BPF_K loads the immediate to
a temporary register before use.

This patch changes the code to avoid using a temporary register
when the BPF immediate is encodable using an arm64 logical immediate
instruction. If the encoding fails (due to the immediate not being
encodable), it falls back to using a temporary register.

Example of generated code for BPF_ALU32_IMM(BPF_AND, R0, 0x80000001):

without optimization:

  24: mov  w10, #0x8000ffff
  28: movk w10, #0x1
  2c: and  w7, w7, w10

with optimization:

  24: and  w7, w7, #0x80000001

Since the encoding process is quite complex, the JIT reuses existing
functionality in arch/arm64/kernel/insn.c for encoding logical immediates
rather than duplicate it in the JIT.
Co-developed-by: default avatarXi Wang <xi.wang@gmail.com>
Signed-off-by: default avatarXi Wang <xi.wang@gmail.com>
Signed-off-by: default avatarLuke Nelson <luke.r.nels@gmail.com>
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/r/20200508181547.24783-3-luke.r.nels@gmail.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 579d1b3f
...@@ -189,4 +189,18 @@ ...@@ -189,4 +189,18 @@
/* Rn & Rm; set condition flags */ /* Rn & Rm; set condition flags */
#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm) #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
/* Logical (immediate) */
#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \
aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \
A64_VARIANT(sf), Rn, Rd, imm64); \
})
/* Rd = Rn OP imm */
#define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
#define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
#define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
#define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
/* Rn & imm; set condition flags */
#define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)
#endif /* _BPF_JIT_H */ #endif /* _BPF_JIT_H */
...@@ -356,6 +356,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -356,6 +356,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const bool isdw = BPF_SIZE(code) == BPF_DW; const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 jmp_cond, reg; u8 jmp_cond, reg;
s32 jmp_offset; s32 jmp_offset;
u32 a64_insn;
#define check_imm(bits, imm) do { \ #define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \ if ((((imm) > 0) && ((imm) >> (bits))) || \
...@@ -488,18 +489,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -488,18 +489,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break; break;
case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K: case BPF_ALU64 | BPF_AND | BPF_K:
a64_insn = A64_AND_I(is64, dst, dst, imm);
if (a64_insn != AARCH64_BREAK_FAULT) {
emit(a64_insn, ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx); emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_AND(is64, dst, dst, tmp), ctx); emit(A64_AND(is64, dst, dst, tmp), ctx);
}
break; break;
case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K: case BPF_ALU64 | BPF_OR | BPF_K:
a64_insn = A64_ORR_I(is64, dst, dst, imm);
if (a64_insn != AARCH64_BREAK_FAULT) {
emit(a64_insn, ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx); emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ORR(is64, dst, dst, tmp), ctx); emit(A64_ORR(is64, dst, dst, tmp), ctx);
}
break; break;
case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K:
a64_insn = A64_EOR_I(is64, dst, dst, imm);
if (a64_insn != AARCH64_BREAK_FAULT) {
emit(a64_insn, ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx); emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_EOR(is64, dst, dst, tmp), ctx); emit(A64_EOR(is64, dst, dst, tmp), ctx);
}
break; break;
case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K: case BPF_ALU64 | BPF_MUL | BPF_K:
...@@ -628,8 +644,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -628,8 +644,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
goto emit_cond_jmp; goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K:
a64_insn = A64_TST_I(is64, dst, imm);
if (a64_insn != AARCH64_BREAK_FAULT) {
emit(a64_insn, ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx); emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_TST(is64, dst, tmp), ctx); emit(A64_TST(is64, dst, tmp), ctx);
}
goto emit_cond_jmp; goto emit_cond_jmp;
/* function call */ /* function call */
case BPF_JMP | BPF_CALL: case BPF_JMP | BPF_CALL:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment