Commit fa6e23e2 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-bpf-stack-support-in-offload'

Jakub Kicinski says:

====================
nfp: bpf: stack support in offload

This series brings stack support for offload.

We use the LMEM (Local memory) register file as memory to store
the stack.  Since this is a register file we need to do appropriate
shifts on unaligned accesses.  Verifier's state tracking helps us
with that.

LMEM can't be accessed directly, so we add support for setting
pointer registers through which one can read/write LMEM.

This set does not support accessing the stack when the alignment
is not known.  This can be added later (most likely using the byte_align
instructions).  There is also a number of optimizations which have been
left out:
 - in more complex non aligned accesses, double shift and rotation
   can save us a cycle.  This, however, leads to code explosion
   since all access sizes have to be coded separately;
 - since setting LM pointers costs around 5 cycles, we should be
   tracking their values to make sure we don't move them when
   they're already set correctly for earlier access;
 - in case of 8 byte access aligned to 4 bytes and crossing
   32 byte boundary but not crossing a 64 byte boundary we don't
   have to increment the pointer, but this seems like a pretty
   rare case to justify the added complexity.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a5dd4982 9f16c8ab
......@@ -427,6 +427,48 @@ emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
}
static void
__emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
bool dst_lmextn, bool src_lmextn)
{
u64 insn;
insn = OP_LCSR_BASE |
FIELD_PREP(OP_LCSR_A_SRC, areg) |
FIELD_PREP(OP_LCSR_B_SRC, breg) |
FIELD_PREP(OP_LCSR_WRITE, wr) |
FIELD_PREP(OP_LCSR_ADDR, addr) |
FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
{
struct nfp_insn_ur_regs reg;
int err;
/* This instruction takes immeds instead of reg_none() for the ignored
* operand, but we can't encode 2 immeds in one instr with our normal
* swreg infra so if param is an immed, we encode as reg_none() and
* copy the immed to both operands.
*/
if (swreg_type(src) == NN_REG_IMM) {
err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
reg.breg = reg.areg;
} else {
err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
}
if (err) {
nfp_prog->error = err;
return;
}
__emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4,
false, reg.src_lmextn);
}
static void emit_nop(struct nfp_prog *nfp_prog)
{
__emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
......@@ -494,6 +536,12 @@ static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
return tmp_reg;
}
static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
{
while (count--)
emit_nop(nfp_prog);
}
static void
wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
enum br_special special)
......@@ -636,6 +684,246 @@ data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
return 0;
}
typedef int
(*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
bool needs_inc);
static int
wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
bool needs_inc)
{
bool should_inc = needs_inc && new_gpr && !last;
u32 idx, src_byte;
enum shf_sc sc;
swreg reg;
int shf;
u8 mask;
if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
return -EOPNOTSUPP;
idx = off / 4;
/* Move the entire word */
if (size == 4) {
wrp_mov(nfp_prog, reg_both(dst),
should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
return 0;
}
if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
return -EOPNOTSUPP;
src_byte = off % 4;
mask = (1 << size) - 1;
mask <<= dst_byte;
if (WARN_ON_ONCE(mask > 0xf))
return -EOPNOTSUPP;
shf = abs(src_byte - dst_byte) * 8;
if (src_byte == dst_byte) {
sc = SHF_SC_NONE;
} else if (src_byte < dst_byte) {
shf = 32 - shf;
sc = SHF_SC_L_SHF;
} else {
sc = SHF_SC_R_SHF;
}
/* ld_field can address fewer indexes, if offset too large do RMW.
* Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
*/
if (idx <= RE_REG_LM_IDX_MAX) {
reg = reg_lm(lm3 ? 3 : 0, idx);
} else {
reg = imm_a(nfp_prog);
/* If it's not the first part of the load and we start a new GPR
* that means we are loading a second part of the LMEM word into
* a new GPR. IOW we've already looked that LMEM word and
* therefore it has been loaded into imm_a().
*/
if (first || !new_gpr)
wrp_mov(nfp_prog, reg, reg_lm(0, idx));
}
emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
if (should_inc)
wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
return 0;
}
static int
wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
bool needs_inc)
{
bool should_inc = needs_inc && new_gpr && !last;
u32 idx, dst_byte;
enum shf_sc sc;
swreg reg;
int shf;
u8 mask;
if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
return -EOPNOTSUPP;
idx = off / 4;
/* Move the entire word */
if (size == 4) {
wrp_mov(nfp_prog,
should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
reg_b(src));
return 0;
}
if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
return -EOPNOTSUPP;
dst_byte = off % 4;
mask = (1 << size) - 1;
mask <<= dst_byte;
if (WARN_ON_ONCE(mask > 0xf))
return -EOPNOTSUPP;
shf = abs(src_byte - dst_byte) * 8;
if (src_byte == dst_byte) {
sc = SHF_SC_NONE;
} else if (src_byte < dst_byte) {
shf = 32 - shf;
sc = SHF_SC_L_SHF;
} else {
sc = SHF_SC_R_SHF;
}
/* ld_field can address fewer indexes, if offset too large do RMW.
* Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
*/
if (idx <= RE_REG_LM_IDX_MAX) {
reg = reg_lm(lm3 ? 3 : 0, idx);
} else {
reg = imm_a(nfp_prog);
/* Only first and last LMEM locations are going to need RMW,
* the middle location will be overwritten fully.
*/
if (first || last)
wrp_mov(nfp_prog, reg, reg_lm(0, idx));
}
emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
if (new_gpr || last) {
if (idx > RE_REG_LM_IDX_MAX)
wrp_mov(nfp_prog, reg_lm(0, idx), reg);
if (should_inc)
wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
}
return 0;
}
static int
mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
bool clr_gpr, lmem_step step)
{
s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
bool first = true, last;
bool needs_inc = false;
swreg stack_off_reg;
u8 prev_gpr = 255;
u32 gpr_byte = 0;
bool lm3 = true;
int ret;
if (meta->ptr_not_const) {
/* Use of the last encountered ptr_off is OK, they all have
* the same alignment. Depend on low bits of value being
* discarded when written to LMaddr register.
*/
stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
stack_imm(nfp_prog));
emit_alu(nfp_prog, imm_b(nfp_prog),
reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
needs_inc = true;
} else if (off + size <= 64) {
/* We can reach bottom 64B with LMaddr0 */
lm3 = false;
} else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
/* We have to set up a new pointer. If we know the offset
* and the entire access falls into a single 32 byte aligned
* window we won't have to increment the LM pointer.
* The 32 byte alignment is imporant because offset is ORed in
* not added when doing *l$indexN[off].
*/
stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
stack_imm(nfp_prog));
emit_alu(nfp_prog, imm_b(nfp_prog),
stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
off %= 32;
} else {
stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
stack_imm(nfp_prog));
emit_alu(nfp_prog, imm_b(nfp_prog),
stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
needs_inc = true;
}
if (lm3) {
emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
/* For size < 4 one slot will be filled by zeroing of upper. */
wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
}
if (clr_gpr && size < 8)
wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
while (size) {
u32 slice_end;
u8 slice_size;
slice_size = min(size, 4 - gpr_byte);
slice_end = min(off + slice_size, round_up(off + 1, 4));
slice_size = slice_end - off;
last = slice_size == size;
if (needs_inc)
off %= 4;
ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
first, gpr != prev_gpr, last, lm3, needs_inc);
if (ret)
return ret;
prev_gpr = gpr;
first = false;
gpr_byte += slice_size;
if (gpr_byte >= 4) {
gpr_byte -= 4;
gpr++;
}
size -= slice_size;
off += slice_size;
}
return 0;
}
static void
wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
{
......@@ -820,9 +1108,22 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
u8 src = insn->src_reg * 2;
wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
if (insn->src_reg == BPF_REG_10) {
swreg stack_depth_reg;
stack_depth_reg = ur_load_imm_any(nfp_prog,
nfp_prog->stack_depth,
stack_imm(nfp_prog));
emit_alu(nfp_prog, reg_both(dst),
stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
wrp_immed(nfp_prog, reg_both(dst + 1), 0);
} else {
wrp_reg_mov(nfp_prog, dst, src);
wrp_reg_mov(nfp_prog, dst + 1, src + 1);
}
return 0;
}
......@@ -1083,19 +1384,28 @@ static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
meta->insn.imm);
struct nfp_insn_meta *prev = nfp_meta_prev(meta);
u32 imm_lo, imm_hi;
u8 dst;
dst = prev->insn.dst_reg * 2;
imm_lo = prev->insn.imm;
imm_hi = meta->insn.imm;
wrp_immed(nfp_prog, reg_both(dst), imm_lo);
/* mov is always 1 insn, load imm may be two, so try to use mov */
if (imm_hi == imm_lo)
wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
else
wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
return 0;
}
static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
meta->double_cb = imm_ld8_part2;
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
return 0;
}
......@@ -1132,6 +1442,15 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
meta->insn.src_reg * 2, 4);
}
static int
mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size, unsigned int ptr_off)
{
return mem_op_stack(nfp_prog, meta, size, ptr_off,
meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
true, wrp_lmem_load);
}
static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 size)
{
......@@ -1215,6 +1534,10 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (meta->ptr.type == PTR_TO_PACKET)
return mem_ldx_data(nfp_prog, meta, size);
if (meta->ptr.type == PTR_TO_STACK)
return mem_ldx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value);
return -EOPNOTSUPP;
}
......@@ -1292,6 +1615,15 @@ mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
meta->insn.src_reg * 2, size);
}
static int
mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size, unsigned int ptr_off)
{
return mem_op_stack(nfp_prog, meta, size, ptr_off,
meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
false, wrp_lmem_store);
}
static int
mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
......@@ -1299,6 +1631,10 @@ mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (meta->ptr.type == PTR_TO_PACKET)
return mem_stx_data(nfp_prog, meta, size);
if (meta->ptr.type == PTR_TO_STACK)
return mem_stx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value);
return -EOPNOTSUPP;
}
......@@ -1799,7 +2135,7 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
static int nfp_translate(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
int i, err;
int err;
nfp_intro(nfp_prog);
if (nfp_prog->error)
......@@ -1831,8 +2167,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
if (nfp_prog->error)
return nfp_prog->error;
for (i = 0; i < NFP_USTORE_PREFETCH_WINDOW; i++)
emit_nop(nfp_prog);
wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
if (nfp_prog->error)
return nfp_prog->error;
......
......@@ -56,6 +56,7 @@ enum br_special {
enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
STATIC_REG_PKT_LEN = 22, /* Bank B */
};
......@@ -74,6 +75,8 @@ enum nfp_bpf_action_type {
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
#define stack_reg(np) reg_a(STATIC_REG_STACK)
#define stack_imm(np) imm_b(np)
#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
#define pptr_reg(np) pv_ctm_ptr(np)
#define imm_a(np) reg_a(STATIC_REG_IMM)
......@@ -98,6 +101,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
* @ptr: pointer type for memory operations
* @ptr_not_const: pointer is not always constant
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @skip: skip this instruction (optimized out)
......@@ -107,6 +111,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
struct nfp_insn_meta {
struct bpf_insn insn;
struct bpf_reg_state ptr;
bool ptr_not_const;
unsigned int off;
unsigned short n;
bool skip;
......@@ -151,6 +156,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
......@@ -171,6 +177,8 @@ struct nfp_prog {
unsigned int n_translated;
int error;
unsigned int stack_depth;
struct list_head insns;
};
......
......@@ -146,6 +146,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
{
unsigned int code_sz = max_instr * sizeof(u64);
enum nfp_bpf_action_type act;
unsigned int stack_size;
u16 start_off, done_off;
unsigned int max_mtu;
int ret;
......@@ -167,6 +168,13 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (cls_bpf->prog->aux->stack_depth > stack_size) {
nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
cls_bpf->prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
return -ENOMEM;
......
......@@ -111,19 +111,64 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
return 0;
}
static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
const struct bpf_reg_state *reg)
{
s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) {
pr_info("variable ptr stack access\n");
return -EINVAL;
}
if (meta->ptr.type == NOT_INIT)
return 0;
old_off = meta->ptr.off + meta->ptr.var_off.value;
new_off = reg->off + reg->var_off.value;
meta->ptr_not_const |= old_off != new_off;
if (!meta->ptr_not_const)
return 0;
if (old_off % 4 == new_off % 4)
return 0;
pr_info("stack access changed location was:%d is:%d\n",
old_off, new_off);
return -EINVAL;
}
static int
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_verifier_env *env, u8 reg)
const struct bpf_verifier_env *env, u8 reg_no)
{
if (env->cur_state.regs[reg].type != PTR_TO_CTX &&
env->cur_state.regs[reg].type != PTR_TO_PACKET)
const struct bpf_reg_state *reg = &env->cur_state.regs[reg_no];
int err;
if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK &&
reg->type != PTR_TO_PACKET) {
pr_info("unsupported ptr type: %d\n", reg->type);
return -EINVAL;
}
if (meta->ptr.type != NOT_INIT &&
meta->ptr.type != env->cur_state.regs[reg].type)
if (reg->type == PTR_TO_STACK) {
err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
if (err)
return err;
}
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
pr_info("ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type);
return -EINVAL;
}
meta->ptr = env->cur_state.regs[reg];
meta->ptr = *reg;
return 0;
}
......@@ -137,11 +182,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
priv->meta = meta;
if (meta->insn.src_reg == BPF_REG_10 ||
meta->insn.dst_reg == BPF_REG_10) {
pr_err("stack not yet supported\n");
return -EINVAL;
}
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
pr_err("program uses extended registers - jit hardening?\n");
......@@ -170,6 +210,8 @@ int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
struct nfp_bpf_analyzer_priv *priv;
int ret;
nfp_prog->stack_depth = prog->aux->stack_depth;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
......
......@@ -257,6 +257,11 @@ enum lcsr_wr_src {
#define OP_CARB_BASE 0x0e000000000ULL
#define OP_CARB_OR 0x00000010000ULL
#define NFP_CSR_ACT_LM_ADDR0 0x64
#define NFP_CSR_ACT_LM_ADDR1 0x6c
#define NFP_CSR_ACT_LM_ADDR2 0x94
#define NFP_CSR_ACT_LM_ADDR3 0x9c
/* Software register representation, independent of operand type */
#define NN_REG_TYPE GENMASK(31, 24)
#define NN_REG_LM_IDX GENMASK(23, 22)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment