Commit cc88f540 authored by Xu Kuohai's avatar Xu Kuohai Committed by Daniel Borkmann

bpf, arm64: Support sign-extension load instructions

Add JIT support for sign-extension load instructions.
Signed-off-by: default avatarXu Kuohai <xukuohai@huawei.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Tested-by: default avatarFlorent Revest <revest@chromium.org>
Acked-by: default avatarFlorent Revest <revest@chromium.org>
Link: https://lore.kernel.org/bpf/20230815154158.717901-3-xukuohai@huaweicloud.com
parent 6c9f86d3
......@@ -59,10 +59,13 @@
AARCH64_INSN_LDST_##type##_REG_OFFSET)
#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
#define A64_LDRSB(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 8, SIGNED_LOAD)
#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
#define A64_LDRSH(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 16, SIGNED_LOAD)
#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
#define A64_LDRSW(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 32, SIGNED_LOAD)
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
......@@ -73,10 +76,13 @@
AARCH64_INSN_LDST_##type##_IMM_OFFSET)
#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
#define A64_LDRSBI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 8, SIGNED_LOAD)
#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
#define A64_LDRSHI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 16, SIGNED_LOAD)
#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
#define A64_LDRSWI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 32, SIGNED_LOAD)
#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
......
......@@ -715,7 +715,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
/* First pass */
return 0;
if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
return 0;
if (!ctx->prog->aux->extable ||
......@@ -779,6 +780,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 dst_adj;
int off_adj;
int ret;
bool sign_extend;
switch (code) {
/* dst = src */
......@@ -1122,7 +1124,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
return 1;
}
/* LDX: dst = *(size *)(src + off) */
/* LDX: dst = (u64)*(unsigned size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_B:
......@@ -1131,6 +1133,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* LDXS: dst_reg = (s64)*(signed size *)(src_reg + off) */
case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
if (ctx->fpb_offset > 0 && src == fp) {
src_adj = fpb;
off_adj = off + ctx->fpb_offset;
......@@ -1138,29 +1147,49 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
src_adj = src;
off_adj = off;
}
sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX);
switch (BPF_SIZE(code)) {
case BPF_W:
if (is_lsi_offset(off_adj, 2)) {
emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
if (sign_extend)
emit(A64_LDRSWI(dst, src_adj, off_adj), ctx);
else
emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_LDR32(dst, src, tmp), ctx);
if (sign_extend)
emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
else
emit(A64_LDR32(dst, src, tmp), ctx);
}
break;
case BPF_H:
if (is_lsi_offset(off_adj, 1)) {
emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
if (sign_extend)
emit(A64_LDRSHI(dst, src_adj, off_adj), ctx);
else
emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_LDRH(dst, src, tmp), ctx);
if (sign_extend)
emit(A64_LDRSH(dst, src, tmp), ctx);
else
emit(A64_LDRH(dst, src, tmp), ctx);
}
break;
case BPF_B:
if (is_lsi_offset(off_adj, 0)) {
emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
if (sign_extend)
emit(A64_LDRSBI(dst, src_adj, off_adj), ctx);
else
emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_LDRB(dst, src, tmp), ctx);
if (sign_extend)
emit(A64_LDRSB(dst, src, tmp), ctx);
else
emit(A64_LDRB(dst, src, tmp), ctx);
}
break;
case BPF_DW:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment