Commit 7e639208 authored by KP Singh's avatar KP Singh Committed by Alexei Starovoitov

bpf: JIT helpers for fmod_ret progs

* Split the invoke_bpf program to prepare for special handling of
  fmod_ret programs introduced in a subsequent patch.
* Move the definition of emit_cond_near_jump and emit_nops as they are
  needed for fmod_ret.
* Refactor branch target alignment into its own generic helper function
  i.e. emit_align.
Signed-off-by: default avatarKP Singh <kpsingh@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200304191853.1529-3-kpsingh@chromium.org
parent 88fd9e53
...@@ -1361,13 +1361,12 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, ...@@ -1361,13 +1361,12 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
-(stack_size - i * 8)); -(stack_size - i * 8));
} }
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_progs *tp, int stack_size) struct bpf_prog *p, int stack_size)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = 0, i; int cnt = 0;
for (i = 0; i < tp->nr_progs; i++) {
if (emit_call(&prog, __bpf_prog_enter, prog)) if (emit_call(&prog, __bpf_prog_enter, prog))
return -EINVAL; return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */ /* remember prog start time returned by __bpf_prog_enter */
...@@ -1376,21 +1375,82 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, ...@@ -1376,21 +1375,82 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
/* arg1: lea rdi, [rbp - stack_size] */ /* arg1: lea rdi, [rbp - stack_size] */
EMIT4(0x48, 0x8D, 0x7D, -stack_size); EMIT4(0x48, 0x8D, 0x7D, -stack_size);
/* arg2: progs[i]->insnsi for interpreter */ /* arg2: progs[i]->insnsi for interpreter */
if (!tp->progs[i]->jited) if (!p->jited)
emit_mov_imm64(&prog, BPF_REG_2, emit_mov_imm64(&prog, BPF_REG_2,
(long) tp->progs[i]->insnsi >> 32, (long) p->insnsi >> 32,
(u32) (long) tp->progs[i]->insnsi); (u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */ /* call JITed bpf program or interpreter */
if (emit_call(&prog, tp->progs[i]->bpf_func, prog)) if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL; return -EINVAL;
/* arg1: mov rdi, progs[i] */ /* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) tp->progs[i] >> 32, emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
(u32) (long) tp->progs[i]); (u32) (long) p);
/* arg2: mov rsi, rbx <- start time in nsec */ /* arg2: mov rsi, rbx <- start time in nsec */
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
if (emit_call(&prog, __bpf_prog_exit, prog)) if (emit_call(&prog, __bpf_prog_exit, prog))
return -EINVAL; return -EINVAL;
*pprog = prog;
return 0;
}
static void emit_nops(u8 **pprog, unsigned int len)
{
unsigned int i, noplen;
u8 *prog = *pprog;
int cnt = 0;
while (len > 0) {
noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
for (i = 0; i < noplen; i++)
EMIT1(ideal_nops[noplen][i]);
len -= noplen;
}
*pprog = prog;
}
static void emit_align(u8 **pprog, u32 align)
{
u8 *target, *prog = *pprog;
target = PTR_ALIGN(prog, align);
if (target != prog)
emit_nops(&prog, target - prog);
*pprog = prog;
}
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
u8 *prog = *pprog;
int cnt = 0;
s64 offset;
offset = func - (ip + 2 + 4);
if (!is_simm32(offset)) {
pr_err("Target %p is out of range\n", func);
return -EINVAL;
}
EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
*pprog = prog;
return 0;
}
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_progs *tp, int stack_size)
{
int i;
u8 *prog = *pprog;
for (i = 0; i < tp->nr_progs; i++) {
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size))
return -EINVAL;
} }
*pprog = prog; *pprog = prog;
return 0; return 0;
...@@ -1531,42 +1591,6 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, ...@@ -1531,42 +1591,6 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
return prog - (u8 *)image; return prog - (u8 *)image;
} }
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
u8 *prog = *pprog;
int cnt = 0;
s64 offset;
offset = func - (ip + 2 + 4);
if (!is_simm32(offset)) {
pr_err("Target %p is out of range\n", func);
return -EINVAL;
}
EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
*pprog = prog;
return 0;
}
static void emit_nops(u8 **pprog, unsigned int len)
{
unsigned int i, noplen;
u8 *prog = *pprog;
int cnt = 0;
while (len > 0) {
noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
for (i = 0; i < noplen; i++)
EMIT1(ideal_nops[noplen][i]);
len -= noplen;
}
*pprog = prog;
}
static int emit_fallback_jump(u8 **pprog) static int emit_fallback_jump(u8 **pprog)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
...@@ -1589,7 +1613,7 @@ static int emit_fallback_jump(u8 **pprog) ...@@ -1589,7 +1613,7 @@ static int emit_fallback_jump(u8 **pprog)
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{ {
u8 *jg_reloc, *jg_target, *prog = *pprog; u8 *jg_reloc, *prog = *pprog;
int pivot, err, jg_bytes = 1, cnt = 0; int pivot, err, jg_bytes = 1, cnt = 0;
s64 jg_offset; s64 jg_offset;
...@@ -1644,9 +1668,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) ...@@ -1644,9 +1668,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
* Coding Rule 11: All branch targets should be 16-byte * Coding Rule 11: All branch targets should be 16-byte
* aligned. * aligned.
*/ */
jg_target = PTR_ALIGN(prog, 16); emit_align(&prog, 16);
if (jg_target != prog)
emit_nops(&prog, jg_target - prog);
jg_offset = prog - jg_reloc; jg_offset = prog - jg_reloc;
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment