Commit 93143f84 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-11-27

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix several bugs in BPF sparc JIT, that is, convergence for fused
   branches, initialization of frame pointer register, and moving all
   arguments into output registers from input registers in prologue
   to fix BPF to BPF calls, from David.

2) Fix a bug in arm64 JIT for fetching BPF to BPF call addresses where
   they are not guaranteed to fit into imm field and therefore must be
   retrieved through prog aux data, from Daniel.

3) Explicitly add all JITs to MAINTAINERS file with developers able to
   help out in feature development, fixes, review, etc.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 69500127 2b9034b5
......@@ -2801,7 +2801,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
S: Supported
F: arch/x86/net/bpf_jit*
F: arch/*/net/*
F: Documentation/networking/filter.txt
F: Documentation/bpf/
F: include/linux/bpf*
......@@ -2821,6 +2821,67 @@ F: tools/bpf/
F: tools/lib/bpf/
F: tools/testing/selftests/bpf/
BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: arch/arm/net/
BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Zi Shen Lim <zlim.lnx@gmail.com>
L: netdev@vger.kernel.org
S: Supported
F: arch/arm64/net/
BPF JIT for MIPS (32-BIT AND 64-BIT)
M: Paul Burton <paul.burton@mips.com>
L: netdev@vger.kernel.org
S: Maintained
F: arch/mips/net/
BPF JIT for NFP NICs
M: Jakub Kicinski <jakub.kicinski@netronome.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/netronome/nfp/bpf/
BPF JIT for POWERPC (32-BIT AND 64-BIT)
M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Sandipan Das <sandipan@linux.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
F: arch/powerpc/net/
BPF JIT for S390
M: Martin Schwidefsky <schwidefsky@de.ibm.com>
M: Heiko Carstens <heiko.carstens@de.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
F: arch/s390/net/
X: arch/s390/net/pnet.c
BPF JIT for SPARC (32-BIT AND 64-BIT)
M: David S. Miller <davem@davemloft.net>
L: netdev@vger.kernel.org
S: Maintained
F: arch/sparc/net/
BPF JIT for X86 32-BIT
M: Wang YanQing <udknight@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: arch/x86/net/bpf_jit_comp32.c
BPF JIT for X86 64-BIT
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
S: Supported
F: arch/x86/net/
X: arch/x86/net/bpf_jit_comp32.c
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
......
......@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx)
* >0 - successfully JITed a 16-byte eBPF instruction.
* <0 - failed to JIT.
*/
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
bool extra_pass)
{
const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg];
......@@ -625,12 +626,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_JMP | BPF_CALL:
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u64 func = (u64)__bpf_call_base + imm;
bool func_addr_fixed;
u64 func_addr;
int ret;
if (ctx->prog->is_func)
emit_addr_mov_i64(tmp, func, ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (func_addr_fixed)
/* We can use optimized emission here. */
emit_a64_mov_i64(tmp, func_addr, ctx);
else
emit_a64_mov_i64(tmp, func, ctx);
emit_addr_mov_i64(tmp, func_addr, ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
......@@ -753,7 +761,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 0;
}
static int build_body(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
int i;
......@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
ret = build_insn(insn, ctx);
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
if (ctx->image == NULL)
......@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset. */
if (build_body(&ctx)) {
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
......@@ -888,7 +896,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
build_prologue(&ctx, was_classic);
if (build_body(&ctx)) {
if (build_body(&ctx, extra_pass)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_off;
......
......@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR();
}
static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
u64 func)
{
#ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to LR */
PPC_MTLR(b2p[TMP_REG_1]);
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
PPC_MTLR(12);
#endif
PPC_BLRL();
}
static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
......@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
{
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i;
int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen];
......@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u32 src_reg = b2p[insn[i].src_reg];
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
u64 func_addr;
u64 imm64;
u8 *func;
u32 true_cond;
u32 tmp_idx;
......@@ -711,23 +738,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
/* bpf function call */
if (insn[i].src_reg == BPF_PSEUDO_CALL)
if (!extra_pass)
func = NULL;
else if (fp->aux->func && off < fp->aux->func_cnt)
/* use the subprog id from the off
* field to lookup the callee address
*/
func = (u8 *) fp->aux->func[off]->bpf_func;
else
return -EINVAL;
/* kernel helper call */
else
func = (u8 *) __bpf_call_base + imm;
bpf_jit_emit_func_call(image, ctx, (u64)func);
ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (func_addr_fixed)
bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
/* move return value from r3 to BPF_REG_0 */
PPC_MR(b2p[BPF_REG_0], 3);
break;
......
......@@ -791,7 +791,7 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
}
/* Just skip the save instruction and the ctx register move. */
#define BPF_TAILCALL_PROLOGUE_SKIP 16
#define BPF_TAILCALL_PROLOGUE_SKIP 32
#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
static void build_prologue(struct jit_ctx *ctx)
......@@ -824,9 +824,15 @@ static void build_prologue(struct jit_ctx *ctx)
const u8 vfp = bpf2sparc[BPF_REG_FP];
emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx);
} else {
emit_nop(ctx);
}
emit_reg_move(I0, O0, ctx);
emit_reg_move(I1, O1, ctx);
emit_reg_move(I2, O2, ctx);
emit_reg_move(I3, O3, ctx);
emit_reg_move(I4, O4, ctx);
/* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
}
......@@ -1270,6 +1276,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp2 = bpf2sparc[TMP_REG_2];
u32 opcode = 0, rs2;
if (insn->dst_reg == BPF_REG_FP)
ctx->saw_frame_pointer = true;
ctx->tmp_2_used = true;
emit_loadimm(imm, tmp2, ctx);
......@@ -1308,6 +1317,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp = bpf2sparc[TMP_REG_1];
u32 opcode = 0, rs2;
if (insn->dst_reg == BPF_REG_FP)
ctx->saw_frame_pointer = true;
switch (BPF_SIZE(code)) {
case BPF_W:
opcode = ST32;
......@@ -1340,6 +1352,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp2 = bpf2sparc[TMP_REG_2];
const u8 tmp3 = bpf2sparc[TMP_REG_3];
if (insn->dst_reg == BPF_REG_FP)
ctx->saw_frame_pointer = true;
ctx->tmp_1_used = true;
ctx->tmp_2_used = true;
ctx->tmp_3_used = true;
......@@ -1360,6 +1375,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp2 = bpf2sparc[TMP_REG_2];
const u8 tmp3 = bpf2sparc[TMP_REG_3];
if (insn->dst_reg == BPF_REG_FP)
ctx->saw_frame_pointer = true;
ctx->tmp_1_used = true;
ctx->tmp_2_used = true;
ctx->tmp_3_used = true;
......@@ -1425,12 +1443,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
struct bpf_prog *tmp, *orig_prog = prog;
struct sparc64_jit_data *jit_data;
struct bpf_binary_header *header;
u32 prev_image_size, image_size;
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
u32 image_size;
u8 *image_ptr;
int pass;
int pass, i;
if (!prog->jit_requested)
return orig_prog;
......@@ -1461,28 +1479,53 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
header = jit_data->header;
extra_pass = true;
image_size = sizeof(u32) * ctx.idx;
prev_image_size = image_size;
pass = 1;
goto skip_init_ctx;
}
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_off;
}
/* Fake pass to detect features used, and get an accurate assessment
* of what the final image size will be.
/* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook
* the offset array so that we converge faster.
*/
for (i = 0; i < prog->len; i++)
ctx.offset[i] = i * (12 * 4);
prev_image_size = ~0U;
for (pass = 1; pass < 40; pass++) {
ctx.idx = 0;
build_prologue(&ctx);
if (build_body(&ctx)) {
prog = orig_prog;
goto out_off;
}
build_prologue(&ctx);
build_epilogue(&ctx);
if (bpf_jit_enable > 1)
pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass,
ctx.idx * 4,
ctx.tmp_1_used ? '1' : ' ',
ctx.tmp_2_used ? '2' : ' ',
ctx.tmp_3_used ? '3' : ' ',
ctx.saw_frame_pointer ? 'F' : ' ',
ctx.saw_call ? 'C' : ' ',
ctx.saw_tail_call ? 'T' : ' ');
if (ctx.idx * 4 == prev_image_size)
break;
prev_image_size = ctx.idx * 4;
cond_resched();
}
/* Now we know the actual image size. */
image_size = sizeof(u32) * ctx.idx;
header = bpf_jit_binary_alloc(image_size, &image_ptr,
......@@ -1494,7 +1537,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ctx.image = (u32 *)image_ptr;
skip_init_ctx:
for (pass = 1; pass < 3; pass++) {
ctx.idx = 0;
build_prologue(&ctx);
......@@ -1507,15 +1549,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
build_epilogue(&ctx);
if (bpf_jit_enable > 1)
pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass,
image_size - (ctx.idx * 4),
ctx.tmp_1_used ? '1' : ' ',
ctx.tmp_2_used ? '2' : ' ',
ctx.tmp_3_used ? '3' : ' ',
ctx.saw_frame_pointer ? 'F' : ' ',
ctx.saw_call ? 'C' : ' ',
ctx.saw_tail_call ? 'T' : ' ');
if (ctx.idx * 4 != prev_image_size) {
pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
prev_image_size, ctx.idx * 4);
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_off;
}
if (bpf_jit_enable > 1)
......
......@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp);
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
......
......@@ -672,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
{
s16 off = insn->off;
s32 imm = insn->imm;
u8 *addr;
*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
if (!*func_addr_fixed) {
/* Place-holder address till the last pass has collected
* all addresses for JITed subprograms in which case we
* can pick them up from prog->aux.
*/
if (!extra_pass)
addr = NULL;
else if (prog->aux->func &&
off >= 0 && off < prog->aux->func_cnt)
addr = (u8 *)prog->aux->func[off]->bpf_func;
else
return -EINVAL;
} else {
/* Address of a BPF helper call. Since part of the core
* kernel, it's always at a fixed location. __bpf_call_base
* and the helper with imm relative to it are both in core
* kernel.
*/
addr = (u8 *)__bpf_call_base + imm;
}
*func_addr = (unsigned long)addr;
return 0;
}
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment