Commit e2c95a61 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf, ppc64: generalize fetching subprog into bpf_jit_get_func_addr

Make fetching of the BPF call address from ppc64 JIT generic. ppc64
was using a slightly different variant rather than through the insns'
imm field encoding as the target address would not fit into that space.
Therefore, the target subprog number was encoded into the insns' offset
and fetched through fp->aux->func[off]->bpf_func instead. Given there
are other JITs with this issue and the mechanism of fetching the address
is JIT-generic, move it into the core as a helper instead. On the JIT
side, we get information on whether the retrieved address is a fixed
one, that is, not changing through JIT passes, or a dynamic one. For
the former, JITs can optimize their imm emission because this doesn't
change jump offsets throughout JIT process.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarSandipan Das <sandipan@linux.ibm.com>
Tested-by: default avatarSandipan Das <sandipan@linux.ibm.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 69500127
...@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ...@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR(); PPC_BLR();
} }
static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
u64 func)
{
#ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to LR */
PPC_MTLR(b2p[TMP_REG_1]);
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
PPC_MTLR(12);
#endif
PPC_BLRL();
}
static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
u64 func)
{ {
unsigned int i, ctx_idx = ctx->idx; unsigned int i, ctx_idx = ctx->idx;
...@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
{ {
const struct bpf_insn *insn = fp->insnsi; const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len; int flen = fp->len;
int i; int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */ /* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen]; u32 exit_addr = addrs[flen];
...@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u32 src_reg = b2p[insn[i].src_reg]; u32 src_reg = b2p[insn[i].src_reg];
s16 off = insn[i].off; s16 off = insn[i].off;
s32 imm = insn[i].imm; s32 imm = insn[i].imm;
bool func_addr_fixed;
u64 func_addr;
u64 imm64; u64 imm64;
u8 *func;
u32 true_cond; u32 true_cond;
u32 tmp_idx; u32 tmp_idx;
...@@ -711,23 +738,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -711,23 +738,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_CALL: case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC; ctx->seen |= SEEN_FUNC;
/* bpf function call */ ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
if (insn[i].src_reg == BPF_PSEUDO_CALL) &func_addr, &func_addr_fixed);
if (!extra_pass) if (ret < 0)
func = NULL; return ret;
else if (fp->aux->func && off < fp->aux->func_cnt)
/* use the subprog id from the off
* field to lookup the callee address
*/
func = (u8 *) fp->aux->func[off]->bpf_func;
else
return -EINVAL;
/* kernel helper call */
else
func = (u8 *) __bpf_call_base + imm;
bpf_jit_emit_func_call(image, ctx, (u64)func);
if (func_addr_fixed)
bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
/* move return value from r3 to BPF_REG_0 */ /* move return value from r3 to BPF_REG_0 */
PPC_MR(b2p[BPF_REG_0], 3); PPC_MR(b2p[BPF_REG_0], 3);
break; break;
......
...@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); ...@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp);
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
......
...@@ -672,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp) ...@@ -672,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp); bpf_prog_unlock_free(fp);
} }
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
{
s16 off = insn->off;
s32 imm = insn->imm;
u8 *addr;
*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
if (!*func_addr_fixed) {
/* Place-holder address till the last pass has collected
* all addresses for JITed subprograms in which case we
* can pick them up from prog->aux.
*/
if (!extra_pass)
addr = NULL;
else if (prog->aux->func &&
off >= 0 && off < prog->aux->func_cnt)
addr = (u8 *)prog->aux->func[off]->bpf_func;
else
return -EINVAL;
} else {
/* Address of a BPF helper call. Since part of the core
* kernel, it's always at a fixed location. __bpf_call_base
* and the helper with imm relative to it are both in core
* kernel.
*/
addr = (u8 *)__bpf_call_base + imm;
}
*func_addr = (unsigned long)addr;
return 0;
}
static int bpf_jit_blind_insn(const struct bpf_insn *from, static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux, const struct bpf_insn *aux,
struct bpf_insn *to_buff) struct bpf_insn *to_buff)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment