Commit fa6e23e2 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-bpf-stack-support-in-offload'

Jakub Kicinski says:

====================
nfp: bpf: stack support in offload

This series brings stack support for offload.

We use the LMEM (Local memory) register file as memory to store
the stack.  Since this is a register file we need to do appropriate
shifts on unaligned accesses.  Verifier's state tracking helps us
with that.

LMEM can't be accessed directly, so we add support for setting
pointer registers through which one can read/write LMEM.

This set does not support accessing the stack when the alignment
is not known.  This can be added later (most likely using the byte_align
instructions).  There is also a number of optimizations which have been
left out:
 - in more complex non aligned accesses, double shift and rotation
   can save us a cycle.  This, however, leads to code explosion
   since all access sizes have to be coded separately;
 - since setting LM pointers costs around 5 cycles, we should be
   tracking their values to make sure we don't move them when
   they're already set correctly for earlier access;
 - in case of 8 byte access aligned to 4 bytes and crossing
   32 byte boundary but not crossing a 64 byte boundary we don't
   have to increment the pointer, but this seems like a pretty
   rare case to justify the added complexity.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a5dd4982 9f16c8ab
...@@ -56,6 +56,7 @@ enum br_special { ...@@ -56,6 +56,7 @@ enum br_special {
enum static_regs { enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */ STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
STATIC_REG_PKT_LEN = 22, /* Bank B */ STATIC_REG_PKT_LEN = 22, /* Bank B */
}; };
...@@ -74,6 +75,8 @@ enum nfp_bpf_action_type { ...@@ -74,6 +75,8 @@ enum nfp_bpf_action_type {
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
#define stack_reg(np) reg_a(STATIC_REG_STACK)
#define stack_imm(np) imm_b(np)
#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
#define pptr_reg(np) pv_ctm_ptr(np) #define pptr_reg(np) pv_ctm_ptr(np)
#define imm_a(np) reg_a(STATIC_REG_IMM) #define imm_a(np) reg_a(STATIC_REG_IMM)
...@@ -98,6 +101,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); ...@@ -98,6 +101,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
* struct nfp_insn_meta - BPF instruction wrapper * struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction * @insn: BPF instruction
* @ptr: pointer type for memory operations * @ptr: pointer type for memory operations
* @ptr_not_const: pointer is not always constant
* @off: index of first generated machine instruction (in nfp_prog.prog) * @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number * @n: eBPF instruction number
* @skip: skip this instruction (optimized out) * @skip: skip this instruction (optimized out)
...@@ -107,6 +111,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); ...@@ -107,6 +111,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
struct nfp_insn_meta { struct nfp_insn_meta {
struct bpf_insn insn; struct bpf_insn insn;
struct bpf_reg_state ptr; struct bpf_reg_state ptr;
bool ptr_not_const;
unsigned int off; unsigned int off;
unsigned short n; unsigned short n;
bool skip; bool skip;
...@@ -151,6 +156,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) ...@@ -151,6 +156,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @tgt_done: jump target to get the next packet * @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors) * @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong * @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta) * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/ */
struct nfp_prog { struct nfp_prog {
...@@ -171,6 +177,8 @@ struct nfp_prog { ...@@ -171,6 +177,8 @@ struct nfp_prog {
unsigned int n_translated; unsigned int n_translated;
int error; int error;
unsigned int stack_depth;
struct list_head insns; struct list_head insns;
}; };
......
...@@ -146,6 +146,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, ...@@ -146,6 +146,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
{ {
unsigned int code_sz = max_instr * sizeof(u64); unsigned int code_sz = max_instr * sizeof(u64);
enum nfp_bpf_action_type act; enum nfp_bpf_action_type act;
unsigned int stack_size;
u16 start_off, done_off; u16 start_off, done_off;
unsigned int max_mtu; unsigned int max_mtu;
int ret; int ret;
...@@ -167,6 +168,13 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, ...@@ -167,6 +168,13 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (cls_bpf->prog->aux->stack_depth > stack_size) {
nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
cls_bpf->prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code) if (!*code)
return -ENOMEM; return -ENOMEM;
......
...@@ -111,19 +111,64 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, ...@@ -111,19 +111,64 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
return 0; return 0;
} }
static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
const struct bpf_reg_state *reg)
{
s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) {
pr_info("variable ptr stack access\n");
return -EINVAL;
}
if (meta->ptr.type == NOT_INIT)
return 0;
old_off = meta->ptr.off + meta->ptr.var_off.value;
new_off = reg->off + reg->var_off.value;
meta->ptr_not_const |= old_off != new_off;
if (!meta->ptr_not_const)
return 0;
if (old_off % 4 == new_off % 4)
return 0;
pr_info("stack access changed location was:%d is:%d\n",
old_off, new_off);
return -EINVAL;
}
static int static int
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_verifier_env *env, u8 reg) const struct bpf_verifier_env *env, u8 reg_no)
{ {
if (env->cur_state.regs[reg].type != PTR_TO_CTX && const struct bpf_reg_state *reg = &env->cur_state.regs[reg_no];
env->cur_state.regs[reg].type != PTR_TO_PACKET) int err;
if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK &&
reg->type != PTR_TO_PACKET) {
pr_info("unsupported ptr type: %d\n", reg->type);
return -EINVAL; return -EINVAL;
}
if (meta->ptr.type != NOT_INIT && if (reg->type == PTR_TO_STACK) {
meta->ptr.type != env->cur_state.regs[reg].type) err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
if (err)
return err;
}
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
pr_info("ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type);
return -EINVAL; return -EINVAL;
}
meta->ptr = env->cur_state.regs[reg]; meta->ptr = *reg;
return 0; return 0;
} }
...@@ -137,11 +182,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) ...@@ -137,11 +182,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len); meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
priv->meta = meta; priv->meta = meta;
if (meta->insn.src_reg == BPF_REG_10 ||
meta->insn.dst_reg == BPF_REG_10) {
pr_err("stack not yet supported\n");
return -EINVAL;
}
if (meta->insn.src_reg >= MAX_BPF_REG || if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) { meta->insn.dst_reg >= MAX_BPF_REG) {
pr_err("program uses extended registers - jit hardening?\n"); pr_err("program uses extended registers - jit hardening?\n");
...@@ -170,6 +210,8 @@ int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog) ...@@ -170,6 +210,8 @@ int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
struct nfp_bpf_analyzer_priv *priv; struct nfp_bpf_analyzer_priv *priv;
int ret; int ret;
nfp_prog->stack_depth = prog->aux->stack_depth;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
......
...@@ -257,6 +257,11 @@ enum lcsr_wr_src { ...@@ -257,6 +257,11 @@ enum lcsr_wr_src {
#define OP_CARB_BASE 0x0e000000000ULL #define OP_CARB_BASE 0x0e000000000ULL
#define OP_CARB_OR 0x00000010000ULL #define OP_CARB_OR 0x00000010000ULL
#define NFP_CSR_ACT_LM_ADDR0 0x64
#define NFP_CSR_ACT_LM_ADDR1 0x6c
#define NFP_CSR_ACT_LM_ADDR2 0x94
#define NFP_CSR_ACT_LM_ADDR3 0x9c
/* Software register representation, independent of operand type */ /* Software register representation, independent of operand type */
#define NN_REG_TYPE GENMASK(31, 24) #define NN_REG_TYPE GENMASK(31, 24)
#define NN_REG_LM_IDX GENMASK(23, 22) #define NN_REG_LM_IDX GENMASK(23, 22)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment