Commit bb392867 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'ppc-fix'

Daniel Borkmann says:

====================
This set adds a ppc64 JIT fix for xadd as well as a missing test
case for verifying whether xadd messes with src/dst reg. Thanks!
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents f39f28ff fa47a16b
...@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64; u64 imm64;
u8 *func; u8 *func;
u32 true_cond; u32 true_cond;
u32 tmp_idx;
/* /*
* addrs[] maps a BPF bytecode address into a real offset from * addrs[] maps a BPF bytecode address into a real offset from
...@@ -637,11 +638,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -637,11 +638,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_STX | BPF_XADD | BPF_W: case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */ /* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not word-aligned */ tmp_idx = ctx->idx * 4;
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
/* load value from memory into TMP_REG_2 */ /* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */ /* add value from src_reg into this */
...@@ -649,32 +646,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -649,32 +646,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
/* store result back */ /* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */ /* we're done if this succeeded */
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); PPC_BCC_SHORT(COND_NE, tmp_idx);
/* otherwise, let's try once more */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* exit if the store was not successful */
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
break; break;
/* *(u64 *)(dst + off) += src */ /* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW: case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not doubleword-aligned */ tmp_idx = ctx->idx * 4;
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_LI(b2p[BPF_REG_0], 0); PPC_BCC_SHORT(COND_NE, tmp_idx);
PPC_BCC(COND_NE, exit_addr);
break; break;
/* /*
......
...@@ -12004,6 +12004,46 @@ static struct bpf_test tests[] = { ...@@ -12004,6 +12004,46 @@ static struct bpf_test tests[] = {
.errstr = "BPF_XADD stores into R2 packet", .errstr = "BPF_XADD stores into R2 packet",
.prog_type = BPF_PROG_TYPE_XDP, .prog_type = BPF_PROG_TYPE_XDP,
}, },
{
"xadd/w check whether src/dst got mangled, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.retval = 3,
},
{
"xadd/w check whether src/dst got mangled, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.retval = 3,
},
{ {
"bpf_get_stack return R0 within range", "bpf_get_stack return R0 within range",
.insns = { .insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment