Commit 2d9206b2 authored by Hari Bathini's avatar Hari Bathini Committed by Michael Ellerman

powerpc/bpf/32: Add instructions for atomic_[cmp]xchg

This adds two atomic opcodes BPF_XCHG and BPF_CMPXCHG on ppc32, both
of which include the BPF_FETCH flag.  The kernel's atomic_cmpxchg
operation fundamentally has 3 operands, but we only have two register
fields. Therefore the operand we compare against (the kernel's API
calls it 'old') is hard-coded to be BPF_REG_R0. Also, kernel's
atomic_cmpxchg returns the previous value at dst_reg + off. JIT the
same for BPF too with return value put in BPF_REG_0.

  BPF_REG_R0 = atomic_cmpxchg(dst_reg + off, BPF_REG_R0, src_reg);
Signed-off-by: default avatarHari Bathini <hbathini@linux.ibm.com>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le)
Reviewed-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220610155552.25892-6-hbathini@linux.ibm.com
parent aea7ef8a
......@@ -297,6 +297,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
u32 tmp_reg = bpf_to_ppc(TMP_REG);
u32 size = BPF_SIZE(code);
u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
......@@ -799,6 +800,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
save_reg = _R0;
ret_reg = src_reg;
bpf_set_seen_register(ctx, tmp_reg);
bpf_set_seen_register(ctx, ax_reg);
......@@ -829,6 +833,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
case BPF_XOR | BPF_FETCH:
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
break;
case BPF_CMPXCHG:
/*
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
* in src_reg for other cases.
*/
ret_reg = bpf_to_ppc(BPF_REG_0);
/* Compare with old value in BPF_REG_0 */
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
/* Don't set if different from old value */
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
fallthrough;
case BPF_XCHG:
save_reg = src_reg;
break;
default:
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
......@@ -836,15 +855,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
}
/* store new value */
EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
EMIT(PPC_RAW_MR(src_reg, ax_reg));
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(src_reg_h, 0));
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
}
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment