Commit 43d636f8 authored by Naveen N. Rao's avatar Naveen N. Rao Committed by Michael Ellerman

powerpc64/bpf elfv1: Do not load TOC before calling functions

BPF helpers always reside in core kernel and all BPF programs use the
kernel TOC. As such, there is no need to load the TOC before calling
helpers or other BPF functions. Drop code to do the same.

Add a check to ensure we don't proceed if this assumption ever changes
in future.
Signed-off-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/a3cd3da4d24d95d845cd10382b1af083600c9074.1644834730.git.naveen.n.rao@linux.vnet.ibm.com
parent b10cb163
...@@ -178,7 +178,7 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i) ...@@ -178,7 +178,7 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
ctx->seen &= ~(1 << (31 - i)); ctx->seen &= ~(1 << (31 - i));
} }
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func); int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, int pass); u32 *addrs, int pass);
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx); void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
......
...@@ -59,7 +59,9 @@ static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image, ...@@ -59,7 +59,9 @@ static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
*/ */
tmp_idx = ctx->idx; tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4; ctx->idx = addrs[i] / 4;
bpf_jit_emit_func_call_rel(image, ctx, func_addr); ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
if (ret)
return ret;
/* /*
* Restore ctx->idx here. This is safe as the length * Restore ctx->idx here. This is safe as the length
......
...@@ -193,7 +193,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ...@@ -193,7 +193,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR()); EMIT(PPC_RAW_BLR());
} }
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{ {
s32 rel = (s32)func - (s32)(image + ctx->idx); s32 rel = (s32)func - (s32)(image + ctx->idx);
...@@ -209,6 +209,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun ...@@ -209,6 +209,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
EMIT(PPC_RAW_MTCTR(_R0)); EMIT(PPC_RAW_MTCTR(_R0));
EMIT(PPC_RAW_BCTRL()); EMIT(PPC_RAW_BCTRL());
} }
return 0;
} }
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
...@@ -961,7 +963,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -961,7 +963,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12)); EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12));
} }
bpf_jit_emit_func_call_rel(image, ctx, func_addr); ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
if (ret)
return ret;
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3)); EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3));
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4)); EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4));
......
...@@ -147,9 +147,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ...@@ -147,9 +147,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR()); EMIT(PPC_RAW_BLR());
} }
static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
u64 func)
{ {
unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
return -EINVAL;
#ifdef PPC64_ELF_ABI_v1 #ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */ /* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func); PPC_LI64(b2p[TMP_REG_2], func);
...@@ -157,25 +161,23 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, ...@@ -157,25 +161,23 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to CTR */ /* ... and move it to CTR */
EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1])); EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else #else
/* We can clobber r12 */ /* We can clobber r12 */
PPC_FUNC_ADDR(12, func); PPC_FUNC_ADDR(12, func);
EMIT(PPC_RAW_MTCTR(12)); EMIT(PPC_RAW_MTCTR(12));
#endif #endif
EMIT(PPC_RAW_BCTRL()); EMIT(PPC_RAW_BCTRL());
return 0;
} }
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{ {
unsigned int i, ctx_idx = ctx->idx; unsigned int i, ctx_idx = ctx->idx;
if (WARN_ON_ONCE(func && is_module_text_address(func)))
return -EINVAL;
/* Load function address into r12 */ /* Load function address into r12 */
PPC_LI64(12, func); PPC_LI64(12, func);
...@@ -193,19 +195,14 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun ...@@ -193,19 +195,14 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
#ifdef PPC64_ELF_ABI_v1 #ifdef PPC64_ELF_ABI_v1
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, 12, 8);
/* Load actual entry point from function descriptor */ /* Load actual entry point from function descriptor */
PPC_BPF_LL(12, 12, 0); PPC_BPF_LL(12, 12, 0);
#endif #endif
EMIT(PPC_RAW_MTCTR(12)); EMIT(PPC_RAW_MTCTR(12));
EMIT(PPC_RAW_BCTRL()); EMIT(PPC_RAW_BCTRL());
return 0;
} }
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
...@@ -890,9 +887,13 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -890,9 +887,13 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
return ret; return ret;
if (func_addr_fixed) if (func_addr_fixed)
bpf_jit_emit_func_call_hlp(image, ctx, func_addr); ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else else
bpf_jit_emit_func_call_rel(image, ctx, func_addr); ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
if (ret)
return ret;
/* move return value from r3 to BPF_REG_0 */ /* move return value from r3 to BPF_REG_0 */
EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3)); EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment