Commit 22527437 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'nfp-bpf-updates'

Jakub Kicinski says:

====================
This set adds support for update and delete calls from the datapath,
as well as XADD instructions (32 and 64 bit) and pseudo random numbers.
The XADD support depends on verifier enforcing alignment which Daniel
recently added.  XADD uses NFP's atomic engine which requires values
to be in big endian, therefore we need to keep track of which parts of
the values are used as atomics and byte swap them accordingly.  Pseudo
random numbers are generated using NFP's HW pseudo random number
generator.

Jiong tackles initial implementation of packet cache, which he describes
as follows:

Memory reads on NFP would first fetch data from memory to transfer-in
registers, then move them from transfer-in to general registers.

Given NFP is rich on transfer-in registers, they could serve as memory
cache.

This patch tries to identify a sequence of packet data read (BPF_LDX) that
are executed sequentially, then the total access range of the sequence is
calculated and attached to each read instruction, the first instruction
in this sequence is marked with an cache init flag so the execution of
it would bring in the whole range of packet data for the sequence.

All later packet reads in this sequence would fetch data from transfer-in
registers directly, no need to JIT NFP memory access.

Function call, non-packet-data memory read, packet write and memcpy will
invalidate the cache and start a new cache range.

Cache invalidation could be improved in the future, for example packet
write doesn't need to invalidate the cache if the the write destination
won't be read again.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents f6ef5658 7c095f5d
......@@ -218,17 +218,17 @@ nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
return skb;
hdr = (struct cmsg_hdr *)skb->data;
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
skb->len, reply_size);
goto err_free;
}
if (hdr->type != __CMSG_REPLY(type)) {
cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
hdr->type, __CMSG_REPLY(type));
goto err_free;
}
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
type, skb->len, reply_size);
goto err_free;
}
return skb;
err_free:
......
......@@ -41,6 +41,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_FUNC = 1,
NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
NFP_BPF_CAP_TYPE_MAPS = 3,
NFP_BPF_CAP_TYPE_RANDOM = 4,
};
struct nfp_bpf_cap_tlv_func {
......
......@@ -284,6 +284,12 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
case BPF_FUNC_map_lookup_elem:
bpf->helpers.map_lookup = readl(&cap->func_addr);
break;
case BPF_FUNC_map_update_elem:
bpf->helpers.map_update = readl(&cap->func_addr);
break;
case BPF_FUNC_map_delete_elem:
bpf->helpers.map_delete = readl(&cap->func_addr);
break;
}
return 0;
......@@ -309,6 +315,14 @@ nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
return 0;
}
static int
nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
u32 length)
{
bpf->pseudo_random = true;
return 0;
}
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
......@@ -347,6 +361,10 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
if (nfp_bpf_parse_cap_maps(app->priv, value, length))
goto err_release_free;
break;
case NFP_BPF_CAP_TYPE_RANDOM:
if (nfp_bpf_parse_cap_random(app->priv, value, length))
goto err_release_free;
break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
......
......@@ -72,6 +72,7 @@ enum nfp_relo_type {
#define BR_OFF_RELO 15000
enum static_regs {
STATIC_REG_IMMA = 20, /* Bank AB */
STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
STATIC_REG_PKT_LEN = 22, /* Bank B */
......@@ -91,6 +92,8 @@ enum pkt_vec {
#define pptr_reg(np) pv_ctm_ptr(np)
#define imm_a(np) reg_a(STATIC_REG_IMM)
#define imm_b(np) reg_b(STATIC_REG_IMM)
#define imma_a(np) reg_a(STATIC_REG_IMMA)
#define imma_b(np) reg_b(STATIC_REG_IMMA)
#define imm_both(np) reg_both(STATIC_REG_IMM)
#define NFP_BPF_ABI_FLAGS reg_imm(0)
......@@ -128,6 +131,10 @@ enum pkt_vec {
*
* @helpers: helper addressess for various calls
* @helpers.map_lookup: map lookup helper address
* @helpers.map_update: map update helper address
* @helpers.map_delete: map delete helper address
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
*/
struct nfp_app_bpf {
struct nfp_app *app;
......@@ -162,7 +169,18 @@ struct nfp_app_bpf {
struct {
u32 map_lookup;
u32 map_update;
u32 map_delete;
} helpers;
bool pseudo_random;
};
enum nfp_bpf_map_use {
NFP_MAP_UNUSED = 0,
NFP_MAP_USE_READ,
NFP_MAP_USE_WRITE,
NFP_MAP_USE_ATOMIC_CNT,
};
/**
......@@ -171,12 +189,14 @@ struct nfp_app_bpf {
* @bpf: back pointer to bpf app private structure
* @tid: table id identifying map on datapath
* @l: link on the nfp_app_bpf->map_list list
* @use_map: map of how the value is used (in 4B chunks)
*/
struct nfp_bpf_map {
struct bpf_offloaded_map *offmap;
struct nfp_app_bpf *bpf;
u32 tid;
struct list_head l;
enum nfp_bpf_map_use use_map[];
};
struct nfp_prog;
......@@ -190,6 +210,16 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
#define nfp_meta_next(meta) list_next_entry(meta, l)
#define nfp_meta_prev(meta) list_prev_entry(meta, l)
/**
* struct nfp_bpf_reg_state - register state for calls
* @reg: BPF register state from latest path
* @var_off: for stack arg - changes stack offset on different paths
*/
struct nfp_bpf_reg_state {
struct bpf_reg_state reg;
bool var_off;
};
#define FLAG_INSN_IS_JUMP_DST BIT(0)
/**
......@@ -199,11 +229,16 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
* @ldst_gather_len: memcpy length gathered from load/store sequence
* @paired_st: the paired store insn at the head of the sequence
* @ptr_not_const: pointer is not always constant
* @pkt_cache: packet data cache information
* @pkt_cache.range_start: start offset for associated packet data cache
* @pkt_cache.range_end: end offset for associated packet data cache
* @pkt_cache.do_init: this read needs to initialize packet data cache
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
* @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
......@@ -214,18 +249,27 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
struct nfp_insn_meta {
struct bpf_insn insn;
union {
/* pointer ops (ld/st/xadd) */
struct {
struct bpf_reg_state ptr;
struct bpf_insn *paired_st;
s16 ldst_gather_len;
bool ptr_not_const;
struct {
s16 range_start;
s16 range_end;
bool do_init;
} pkt_cache;
bool xadd_over_16bit;
bool xadd_maybe_16bit;
};
/* jump */
struct nfp_insn_meta *jmp_dst;
/* function calls */
struct {
u32 func_id;
struct bpf_reg_state arg1;
struct bpf_reg_state arg2;
bool arg2_var_off;
struct nfp_bpf_reg_state arg2;
};
};
unsigned int off;
......@@ -269,6 +313,41 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
}
static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
{
return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
}
static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
{
return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
}
static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
{
u8 code = meta->insn.code;
return BPF_CLASS(code) == BPF_LD &&
(BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
}
static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
{
u8 code = meta->insn.code;
return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
}
static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
{
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
}
static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
/**
* struct nfp_prog - nfp BPF program
* @bpf: backpointer to the bpf app priv structure
......
......@@ -164,6 +164,41 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
return 0;
}
/* Atomic engine requires values to be in big endian, we need to byte swap
* the value words used with xadd.
*/
static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
{
u32 *word = value;
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
word[i] = (__force u32)cpu_to_be32(word[i]);
}
static int
nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
int err;
err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
if (err)
return err;
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
return 0;
}
static int
nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
}
static int
nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
......@@ -183,8 +218,8 @@ nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
.map_get_next_key = nfp_bpf_map_get_next_key,
.map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
.map_update_elem = nfp_bpf_ctrl_update_entry,
.map_lookup_elem = nfp_bpf_map_lookup_entry,
.map_update_elem = nfp_bpf_map_update_entry,
.map_delete_elem = nfp_bpf_map_delete_elem,
};
......@@ -192,6 +227,7 @@ static int
nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
{
struct nfp_bpf_map *nfp_map;
unsigned int use_map_size;
long long int res;
if (!bpf->maps.types)
......@@ -226,7 +262,10 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
return -ENOMEM;
}
nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
if (!nfp_map)
return -ENOMEM;
......
......@@ -97,7 +97,7 @@ nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
if (nfp_prog->adjust_head_location != meta->n)
goto exit_set_location;
if (meta->arg2.var_off.value != imm)
if (meta->arg2.reg.var_off.value != imm)
goto exit_set_location;
}
......@@ -106,15 +106,70 @@ nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
nfp_prog->adjust_head_location = location;
}
static int
nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
struct nfp_bpf_reg_state *old_arg)
{
s64 off, old_off;
if (reg->type != PTR_TO_STACK) {
pr_vlog(env, "%s: unsupported ptr type %d\n",
fname, reg->type);
return false;
}
if (!tnum_is_const(reg->var_off)) {
pr_vlog(env, "%s: variable pointer\n", fname);
return false;
}
off = reg->var_off.value + reg->off;
if (-off % 4) {
pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
return false;
}
/* Rest of the checks is only if we re-parse the same insn */
if (!old_arg)
return true;
old_off = old_arg->reg.var_off.value + old_arg->reg.off;
old_arg->var_off |= off != old_off;
return true;
}
static bool
nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
struct nfp_insn_meta *meta,
u32 helper_tgt, const struct bpf_reg_state *reg1)
{
if (!helper_tgt) {
pr_vlog(env, "%s: not supported by FW\n", fname);
return false;
}
/* Rest of the checks is only if we re-parse the same insn */
if (!meta->func_id)
return true;
if (meta->arg1.map_ptr != reg1->map_ptr) {
pr_vlog(env, "%s: called for different map\n", fname);
return false;
}
return true;
}
static int
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
struct nfp_insn_meta *meta)
{
const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
struct nfp_app_bpf *bpf = nfp_prog->bpf;
u32 func_id = meta->insn.imm;
s64 off, old_off;
switch (func_id) {
case BPF_FUNC_xdp_adjust_head:
......@@ -131,41 +186,36 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
break;
case BPF_FUNC_map_lookup_elem:
if (!bpf->helpers.map_lookup) {
pr_vlog(env, "map_lookup: not supported by FW\n");
if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
bpf->helpers.map_lookup, reg1) ||
!nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
meta->func_id ? &meta->arg2 : NULL))
return -EOPNOTSUPP;
}
if (reg2->type != PTR_TO_STACK) {
pr_vlog(env,
"map_lookup: unsupported key ptr type %d\n",
reg2->type);
return -EOPNOTSUPP;
}
if (!tnum_is_const(reg2->var_off)) {
pr_vlog(env, "map_lookup: variable key pointer\n");
break;
case BPF_FUNC_map_update_elem:
if (!nfp_bpf_map_call_ok("map_update", env, meta,
bpf->helpers.map_update, reg1) ||
!nfp_bpf_stack_arg_ok("map_update", env, reg2,
meta->func_id ? &meta->arg2 : NULL) ||
!nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
return -EOPNOTSUPP;
}
break;
off = reg2->var_off.value + reg2->off;
if (-off % 4) {
pr_vlog(env,
"map_lookup: unaligned stack pointer %lld\n",
-off);
case BPF_FUNC_map_delete_elem:
if (!nfp_bpf_map_call_ok("map_delete", env, meta,
bpf->helpers.map_delete, reg1) ||
!nfp_bpf_stack_arg_ok("map_delete", env, reg2,
meta->func_id ? &meta->arg2 : NULL))
return -EOPNOTSUPP;
}
break;
/* Rest of the checks is only if we re-parse the same insn */
if (!meta->func_id)
case BPF_FUNC_get_prandom_u32:
if (bpf->pseudo_random)
break;
pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
return -EOPNOTSUPP;
old_off = meta->arg2.var_off.value + meta->arg2.off;
meta->arg2_var_off |= off != old_off;
if (meta->arg1.map_ptr != reg1->map_ptr) {
pr_vlog(env, "map_lookup: called for different map\n");
return -EOPNOTSUPP;
}
break;
default:
pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP;
......@@ -173,7 +223,7 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
meta->func_id = func_id;
meta->arg1 = *reg1;
meta->arg2 = *reg2;
meta->arg2.reg = *reg2;
return 0;
}
......@@ -242,6 +292,72 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
return -EINVAL;
}
static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
{
static const char * const names[] = {
[NFP_MAP_UNUSED] = "unused",
[NFP_MAP_USE_READ] = "read",
[NFP_MAP_USE_WRITE] = "write",
[NFP_MAP_USE_ATOMIC_CNT] = "atomic",
};
if (use >= ARRAY_SIZE(names) || !names[use])
return "unknown";
return names[use];
}
static int
nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
struct nfp_bpf_map *nfp_map,
unsigned int off, enum nfp_bpf_map_use use)
{
if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
nfp_map->use_map[off / 4] != use) {
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
nfp_bpf_map_use_name(use), off);
return -EOPNOTSUPP;
}
nfp_map->use_map[off / 4] = use;
return 0;
}
static int
nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
const struct bpf_reg_state *reg,
enum nfp_bpf_map_use use)
{
struct bpf_offloaded_map *offmap;
struct nfp_bpf_map *nfp_map;
unsigned int size, off;
int i, err;
if (!tnum_is_const(reg->var_off)) {
pr_vlog(env, "map value offset is variable\n");
return -EOPNOTSUPP;
}
off = reg->var_off.value + meta->insn.off + reg->off;
size = BPF_LDST_BYTES(&meta->insn);
offmap = map_to_offmap(reg->map_ptr);
nfp_map = offmap->dev_priv;
if (off + size > offmap->map.value_size) {
pr_vlog(env, "map value access out-of-bounds\n");
return -EINVAL;
}
for (i = 0; i < size; i += 4 - (off + i) % 4) {
err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
if (err)
return err;
}
return 0;
}
static int
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
struct bpf_verifier_env *env, u8 reg_no)
......@@ -264,10 +380,22 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
if (reg->type == PTR_TO_MAP_VALUE) {
if (is_mbpf_load(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_READ);
if (err)
return err;
}
if (is_mbpf_store(meta)) {
pr_vlog(env, "map writes not supported\n");
return -EOPNOTSUPP;
}
if (is_mbpf_xadd(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_ATOMIC_CNT);
if (err)
return err;
}
}
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
......@@ -281,6 +409,31 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
static int
nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
struct bpf_verifier_env *env)
{
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type);
return -EOPNOTSUPP;
}
if (sreg->type != SCALAR_VALUE) {
pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
return -EOPNOTSUPP;
}
meta->xadd_over_16bit |=
sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
meta->xadd_maybe_16bit |=
(sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
}
static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
......@@ -313,6 +466,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (is_mbpf_store(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
return 0;
}
......
......@@ -48,6 +48,8 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ32_SWAP] = { 0x02, 0x5c },
[CMD_TGT_READ_LE] = { 0x01, 0x40 },
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
[CMD_TGT_ADD] = { 0x00, 0x47 },
[CMD_TGT_ADD_IMM] = { 0x02, 0x47 },
};
static bool unreg_is_imm(u16 reg)
......
......@@ -39,6 +39,7 @@
#include <linux/types.h>
#define REG_NONE 0
#define REG_WIDTH 4
#define RE_REG_NO_DST 0x020
#define RE_REG_IMM 0x020
......@@ -237,6 +238,8 @@ enum cmd_tgt_map {
CMD_TGT_READ32_SWAP,
CMD_TGT_READ_LE,
CMD_TGT_READ_SWAP_LE,
CMD_TGT_ADD,
CMD_TGT_ADD_IMM,
__CMD_TGT_MAP_SIZE,
};
......@@ -250,9 +253,12 @@ enum cmd_mode {
enum cmd_ctx_swap {
CMD_CTX_SWAP = 0,
CMD_CTX_SWAP_DEFER1 = 1,
CMD_CTX_SWAP_DEFER2 = 2,
CMD_CTX_NO_SWAP = 3,
};
#define CMD_OVE_DATA GENMASK(5, 3)
#define CMD_OVE_LEN BIT(7)
#define CMD_OV_LEN GENMASK(12, 8)
......@@ -278,6 +284,7 @@ enum lcsr_wr_src {
#define NFP_CSR_ACT_LM_ADDR1 0x6c
#define NFP_CSR_ACT_LM_ADDR2 0x94
#define NFP_CSR_ACT_LM_ADDR3 0x9c
#define NFP_CSR_PSEUDO_RND_NUM 0x148
/* Software register representation, independent of operand type */
#define NN_REG_TYPE GENMASK(31, 24)
......
......@@ -372,7 +372,7 @@ struct xdp_rxq_info;
#define BPF_LDST_BYTES(insn) \
({ \
const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
WARN_ON(__size < 0); \
__size; \
})
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment