Commit 11c11d07 authored by Brendan Jackman's avatar Brendan Jackman Committed by Alexei Starovoitov

bpf: x86: Factor out emission of ModR/M for *(reg + off)

The case for JITing atomics is about to get more complicated. Let's
factor out some common code to make the review and result more
readable.

NB the atomics code doesn't yet use the new helper - a subsequent
patch will add its use as a side-effect of other changes.
Signed-off-by: default avatarBrendan Jackman <jackmanb@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20210114181751.768687-2-jackmanb@google.com
parent bade5c55
...@@ -681,6 +681,27 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) ...@@ -681,6 +681,27 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
*pprog = prog; *pprog = prog;
} }
/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
{
u8 *prog = *pprog;
int cnt = 0;
if (is_imm8(off)) {
/* 1-byte signed displacement.
*
* If off == 0 we could skip this and save one extra byte, but
* special case of x86 R13 which always needs an offset is not
* worth the hassle
*/
EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
} else {
/* 4-byte signed displacement */
EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
}
*pprog = prog;
}
/* LDX: dst_reg = *(u8*)(src_reg + off) */ /* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{ {
...@@ -708,15 +729,7 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) ...@@ -708,15 +729,7 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
break; break;
} }
/* emit_insn_suffix(&prog, src_reg, dst_reg, off);
* If insn->off == 0 we can save one extra byte, but
* special case of x86 R13 which always needs an offset
* is not worth the hassle
*/
if (is_imm8(off))
EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
else
EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
*pprog = prog; *pprog = prog;
} }
...@@ -751,10 +764,7 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) ...@@ -751,10 +764,7 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
break; break;
} }
if (is_imm8(off)) emit_insn_suffix(&prog, dst_reg, src_reg, off);
EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
else
EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
*pprog = prog; *pprog = prog;
} }
...@@ -1240,11 +1250,8 @@ st: if (is_imm8(insn->off)) ...@@ -1240,11 +1250,8 @@ st: if (is_imm8(insn->off))
goto xadd; goto xadd;
case BPF_STX | BPF_XADD | BPF_DW: case BPF_STX | BPF_XADD | BPF_DW:
EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
xadd: if (is_imm8(insn->off)) xadd:
EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); emit_modrm_dstoff(&prog, dst_reg, src_reg, insn->off);
else
EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
insn->off);
break; break;
/* call */ /* call */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment