Commit 49c3af43 authored by Naveen N. Rao's avatar Naveen N. Rao Committed by Michael Ellerman

powerpc/bpf: Simplify bpf_to_ppc() and adopt it for powerpc64

Convert bpf_to_ppc() to a macro to help simplify its usage since
codegen_context is available in all places it is used. Adopt it also for
powerpc64 for uniformity and get rid of the global b2p structure.
Signed-off-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/09f0540ce3e0cd4120b5b33993b5e73b6ef9e979.1644834730.git.naveen.n.rao@linux.vnet.ibm.com
parent 3a3fc9bf
...@@ -119,12 +119,6 @@ ...@@ -119,12 +119,6 @@
#define SEEN_FUNC 0x20000000 /* might call external helpers */ #define SEEN_FUNC 0x20000000 /* might call external helpers */
#define SEEN_TAILCALL 0x40000000 /* uses tail calls */ #define SEEN_TAILCALL 0x40000000 /* uses tail calls */
#ifdef CONFIG_PPC64
extern const int b2p[MAX_BPF_JIT_REG + 2];
#else
extern const int b2p[MAX_BPF_JIT_REG + 1];
#endif
struct codegen_context { struct codegen_context {
/* /*
* This is used to track register usage as well * This is used to track register usage as well
...@@ -138,11 +132,13 @@ struct codegen_context { ...@@ -138,11 +132,13 @@ struct codegen_context {
unsigned int seen; unsigned int seen;
unsigned int idx; unsigned int idx;
unsigned int stack_size; unsigned int stack_size;
int b2p[ARRAY_SIZE(b2p)]; int b2p[MAX_BPF_JIT_REG + 2];
unsigned int exentry_idx; unsigned int exentry_idx;
unsigned int alt_exit_addr; unsigned int alt_exit_addr;
}; };
#define bpf_to_ppc(r) (ctx->b2p[r])
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
#define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */ #define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */
#else #else
...@@ -170,6 +166,7 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i) ...@@ -170,6 +166,7 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
ctx->seen &= ~(1 << (31 - i)); ctx->seen &= ~(1 << (31 - i));
} }
void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func); int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, int pass); u32 *addrs, int pass);
......
...@@ -72,13 +72,13 @@ static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image, ...@@ -72,13 +72,13 @@ static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
tmp_idx = ctx->idx; tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4; ctx->idx = addrs[i] / 4;
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm); PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm); PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
for (j = ctx->idx - addrs[i] / 4; j < 4; j++) for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
#else #else
func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32); func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
PPC_LI64(b2p[insn[i].dst_reg], func_addr); PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
/* overwrite rest with nops */ /* overwrite rest with nops */
for (j = ctx->idx - addrs[i] / 4; j < 5; j++) for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
...@@ -179,7 +179,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -179,7 +179,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
} }
memset(&cgctx, 0, sizeof(struct codegen_context)); memset(&cgctx, 0, sizeof(struct codegen_context));
memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p)); bpf_jit_init_reg_mapping(&cgctx);
/* Make sure that the stack is quadword aligned. */ /* Make sure that the stack is quadword aligned. */
cgctx.stack_size = round_up(fp->aux->stack_depth, 16); cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
......
...@@ -33,42 +33,38 @@ ...@@ -33,42 +33,38 @@
/* stack frame, ensure this is quadword aligned */ /* stack frame, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size) #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
/* BPF register usage */
#define TMP_REG (MAX_BPF_JIT_REG + 0)
#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0)) #define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
/* PPC NVR range -- update this if we ever use NVRs below r17 */
#define BPF_PPC_NVR_MIN _R17
#define BPF_PPC_TC _R16
/* BPF register usage */
#define TMP_REG (MAX_BPF_JIT_REG + 0)
/* BPF to ppc register mappings */ /* BPF to ppc register mappings */
const int b2p[MAX_BPF_JIT_REG + 1] = { void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
/* function return value */ /* function return value */
[BPF_REG_0] = _R12, ctx->b2p[BPF_REG_0] = _R12;
/* function arguments */ /* function arguments */
[BPF_REG_1] = _R4, ctx->b2p[BPF_REG_1] = _R4;
[BPF_REG_2] = _R6, ctx->b2p[BPF_REG_2] = _R6;
[BPF_REG_3] = _R8, ctx->b2p[BPF_REG_3] = _R8;
[BPF_REG_4] = _R10, ctx->b2p[BPF_REG_4] = _R10;
[BPF_REG_5] = _R22, ctx->b2p[BPF_REG_5] = _R22;
/* non volatile registers */ /* non volatile registers */
[BPF_REG_6] = _R24, ctx->b2p[BPF_REG_6] = _R24;
[BPF_REG_7] = _R26, ctx->b2p[BPF_REG_7] = _R26;
[BPF_REG_8] = _R28, ctx->b2p[BPF_REG_8] = _R28;
[BPF_REG_9] = _R30, ctx->b2p[BPF_REG_9] = _R30;
/* frame pointer aka BPF_REG_10 */ /* frame pointer aka BPF_REG_10 */
[BPF_REG_FP] = _R18, ctx->b2p[BPF_REG_FP] = _R18;
/* eBPF jit internal registers */ /* eBPF jit internal registers */
[BPF_REG_AX] = _R20, ctx->b2p[BPF_REG_AX] = _R20;
[TMP_REG] = _R31, /* 32 bits */ ctx->b2p[TMP_REG] = _R31; /* 32 bits */
};
static int bpf_to_ppc(struct codegen_context *ctx, int reg)
{
return ctx->b2p[reg];
} }
/* PPC NVR range -- update this if we ever use NVRs below r17 */
#define BPF_PPC_NVR_MIN _R17
#define BPF_PPC_TC _R16
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{ {
if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC) if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
...@@ -118,8 +114,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -118,8 +114,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
int i; int i;
/* First arg comes in as a 32 bits pointer. */ /* First arg comes in as a 32 bits pointer. */
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3)); EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0)); EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
/* /*
...@@ -128,7 +124,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -128,7 +124,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* invoked through a tail call. * invoked through a tail call.
*/ */
if (ctx->seen & SEEN_TAILCALL) if (ctx->seen & SEEN_TAILCALL)
EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1, EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_1) - 1, _R1,
bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
else else
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
...@@ -150,15 +146,15 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -150,15 +146,15 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i))); EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
/* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/ /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8); EMIT(PPC_RAW_LWZ(bpf_to_ppc(BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12); EMIT(PPC_RAW_LWZ(bpf_to_ppc(BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
} }
/* Setup frame pointer to point to the bpf stack area */ /* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) { if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0)); EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1, EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size)); STACK_FRAME_MIN_SIZE + ctx->stack_size));
} }
...@@ -178,7 +174,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx ...@@ -178,7 +174,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{ {
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0))); EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
bpf_jit_emit_common_epilogue(image, ctx); bpf_jit_emit_common_epilogue(image, ctx);
...@@ -223,8 +219,8 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o ...@@ -223,8 +219,8 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
* r5-r6/BPF_REG_2 - pointer to bpf_array * r5-r6/BPF_REG_2 - pointer to bpf_array
* r7-r8/BPF_REG_3 - index in bpf_array * r7-r8/BPF_REG_3 - index in bpf_array
*/ */
int b2p_bpf_array = bpf_to_ppc(ctx, BPF_REG_2); int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
int b2p_index = bpf_to_ppc(ctx, BPF_REG_3); int b2p_index = bpf_to_ppc(BPF_REG_3);
/* /*
* if (index >= array->map.max_entries) * if (index >= array->map.max_entries)
...@@ -270,7 +266,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o ...@@ -270,7 +266,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
EMIT(PPC_RAW_MTCTR(_R3)); EMIT(PPC_RAW_MTCTR(_R3));
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1))); EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
/* tear restore NVRs, ... */ /* tear restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx); bpf_jit_emit_common_epilogue(image, ctx);
...@@ -294,11 +290,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -294,11 +290,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
for (i = 0; i < flen; i++) { for (i = 0; i < flen; i++) {
u32 code = insn[i].code; u32 code = insn[i].code;
u32 dst_reg = bpf_to_ppc(ctx, insn[i].dst_reg); u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
u32 dst_reg_h = dst_reg - 1; u32 dst_reg_h = dst_reg - 1;
u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg); u32 src_reg = bpf_to_ppc(insn[i].src_reg);
u32 src_reg_h = src_reg - 1; u32 src_reg_h = src_reg - 1;
u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG); u32 tmp_reg = bpf_to_ppc(TMP_REG);
u32 size = BPF_SIZE(code); u32 size = BPF_SIZE(code);
s16 off = insn[i].off; s16 off = insn[i].off;
s32 imm = insn[i].imm; s32 imm = insn[i].imm;
...@@ -960,17 +956,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -960,17 +956,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (ret < 0) if (ret < 0)
return ret; return ret;
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8)); EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12)); EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
} }
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr); ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
if (ret) if (ret)
return ret; return ret;
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3)); EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4)); EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
break; break;
/* /*
......
...@@ -46,27 +46,28 @@ ...@@ -46,27 +46,28 @@
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
/* BPF to ppc register mappings */ /* BPF to ppc register mappings */
const int b2p[MAX_BPF_JIT_REG + 2] = { void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
/* function return value */ /* function return value */
[BPF_REG_0] = _R8, ctx->b2p[BPF_REG_0] = _R8;
/* function arguments */ /* function arguments */
[BPF_REG_1] = _R3, ctx->b2p[BPF_REG_1] = _R3;
[BPF_REG_2] = _R4, ctx->b2p[BPF_REG_2] = _R4;
[BPF_REG_3] = _R5, ctx->b2p[BPF_REG_3] = _R5;
[BPF_REG_4] = _R6, ctx->b2p[BPF_REG_4] = _R6;
[BPF_REG_5] = _R7, ctx->b2p[BPF_REG_5] = _R7;
/* non volatile registers */ /* non volatile registers */
[BPF_REG_6] = _R27, ctx->b2p[BPF_REG_6] = _R27;
[BPF_REG_7] = _R28, ctx->b2p[BPF_REG_7] = _R28;
[BPF_REG_8] = _R29, ctx->b2p[BPF_REG_8] = _R29;
[BPF_REG_9] = _R30, ctx->b2p[BPF_REG_9] = _R30;
/* frame pointer aka BPF_REG_10 */ /* frame pointer aka BPF_REG_10 */
[BPF_REG_FP] = _R31, ctx->b2p[BPF_REG_FP] = _R31;
/* eBPF jit internal registers */ /* eBPF jit internal registers */
[BPF_REG_AX] = _R12, ctx->b2p[BPF_REG_AX] = _R12;
[TMP_REG_1] = _R9, ctx->b2p[TMP_REG_1] = _R9;
[TMP_REG_2] = _R10 ctx->b2p[TMP_REG_2] = _R10;
}; }
/* PPC NVR range -- update this if we ever use NVRs below r27 */ /* PPC NVR range -- update this if we ever use NVRs below r27 */
#define BPF_PPC_NVR_MIN _R27 #define BPF_PPC_NVR_MIN _R27
...@@ -79,7 +80,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) ...@@ -79,7 +80,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* - the bpf program uses its stack area * - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP * The latter condition is deduced from the usage of BPF_REG_FP
*/ */
return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]); return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
} }
/* /*
...@@ -134,9 +135,9 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -134,9 +135,9 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* invoked through a tail call. * invoked through a tail call.
*/ */
if (ctx->seen & SEEN_TAILCALL) { if (ctx->seen & SEEN_TAILCALL) {
EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0)); EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
/* this goes in the redzone */ /* this goes in the redzone */
EMIT(PPC_RAW_STD(b2p[TMP_REG_1], _R1, -(BPF_PPC_STACK_SAVE + 8))); EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
} else { } else {
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
...@@ -161,12 +162,12 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -161,12 +162,12 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* in the protected zone below the previous stack frame * in the protected zone below the previous stack frame
*/ */
for (i = BPF_REG_6; i <= BPF_REG_10; i++) for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, b2p[i])) if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
EMIT(PPC_RAW_STD(b2p[i], _R1, bpf_jit_stack_offsetof(ctx, b2p[i]))); EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Setup frame pointer to point to the bpf stack area */ /* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP])) if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], _R1, EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size)); STACK_FRAME_MIN_SIZE + ctx->stack_size));
} }
...@@ -176,8 +177,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx ...@@ -176,8 +177,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */ /* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++) for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, b2p[i])) if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
EMIT(PPC_RAW_LD(b2p[i], _R1, bpf_jit_stack_offsetof(ctx, b2p[i]))); EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Tear down our stack frame */ /* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) { if (bpf_has_stack_frame(ctx)) {
...@@ -194,7 +195,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ...@@ -194,7 +195,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
bpf_jit_emit_common_epilogue(image, ctx); bpf_jit_emit_common_epilogue(image, ctx);
/* Move result to r3 */ /* Move result to r3 */
EMIT(PPC_RAW_MR(_R3, b2p[BPF_REG_0])); EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
EMIT(PPC_RAW_BLR()); EMIT(PPC_RAW_BLR());
} }
...@@ -261,8 +262,8 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o ...@@ -261,8 +262,8 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
* r4/BPF_REG_2 - pointer to bpf_array * r4/BPF_REG_2 - pointer to bpf_array
* r5/BPF_REG_3 - index in bpf_array * r5/BPF_REG_3 - index in bpf_array
*/ */
int b2p_bpf_array = b2p[BPF_REG_2]; int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
int b2p_index = b2p[BPF_REG_3]; int b2p_index = bpf_to_ppc(BPF_REG_3);
int bpf_tailcall_prologue_size = 8; int bpf_tailcall_prologue_size = 8;
if (__is_defined(PPC64_ELF_ABI_v2)) if (__is_defined(PPC64_ELF_ABI_v2))
...@@ -272,42 +273,42 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o ...@@ -272,42 +273,42 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
* if (index >= array->map.max_entries) * if (index >= array->map.max_entries)
* goto out; * goto out;
*/ */
EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31)); EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1])); EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
PPC_BCC_SHORT(COND_GE, out); PPC_BCC_SHORT(COND_GE, out);
/* /*
* if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
* goto out; * goto out;
*/ */
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R1, bpf_jit_stack_tailcallcnt(ctx))); EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT)); EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
PPC_BCC_SHORT(COND_GE, out); PPC_BCC_SHORT(COND_GE, out);
/* /*
* tail_call_cnt++; * tail_call_cnt++;
*/ */
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1)); EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
EMIT(PPC_RAW_STD(b2p[TMP_REG_1], _R1, bpf_jit_stack_tailcallcnt(ctx))); EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
/* prog = array->ptrs[index]; */ /* prog = array->ptrs[index]; */
EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8)); EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array)); EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs))); EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
/* /*
* if (prog == NULL) * if (prog == NULL)
* goto out; * goto out;
*/ */
EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0)); EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
PPC_BCC_SHORT(COND_EQ, out); PPC_BCC_SHORT(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */ /* goto *(prog->bpf_func + prologue_size); */
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func))); EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size)); FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1])); EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
/* tear down stack, restore NVRs, ... */ /* tear down stack, restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx); bpf_jit_emit_common_epilogue(image, ctx);
...@@ -354,11 +355,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -354,11 +355,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
for (i = 0; i < flen; i++) { for (i = 0; i < flen; i++) {
u32 code = insn[i].code; u32 code = insn[i].code;
u32 dst_reg = b2p[insn[i].dst_reg]; u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
u32 src_reg = b2p[insn[i].src_reg]; u32 src_reg = bpf_to_ppc(insn[i].src_reg);
u32 size = BPF_SIZE(code); u32 size = BPF_SIZE(code);
u32 tmp1_reg = b2p[TMP_REG_1]; u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
u32 tmp2_reg = b2p[TMP_REG_2]; u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
s16 off = insn[i].off; s16 off = insn[i].off;
s32 imm = insn[i].imm; s32 imm = insn[i].imm;
bool func_addr_fixed; bool func_addr_fixed;
...@@ -938,7 +939,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -938,7 +939,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
return ret; return ret;
/* move return value from r3 to BPF_REG_0 */ /* move return value from r3 to BPF_REG_0 */
EMIT(PPC_RAW_MR(b2p[BPF_REG_0], _R3)); EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
break; break;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment