Commit afb90dbb authored by Ioana Radulescu's avatar Ioana Radulescu Committed by David S. Miller

dpaa2-eth: Add ethtool support for flow classification

Add support for inserting and deleting Rx flow classification
rules through ethtool.

We support classification based on some header fields for
flow-types ether, ip4, tcp4, udp4 and sctp4.

Rx queues are core affine, so the action argument effectively
selects on which cpu the matching frame will be processed.
Discarding the frame is also supported.
Signed-off-by: default avatarIoana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4aaaf9b9
...@@ -1897,6 +1897,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) ...@@ -1897,6 +1897,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
if (err) if (err)
goto close; goto close;
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
goto close;
return 0; return 0;
close: close:
...@@ -2011,6 +2016,18 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { ...@@ -2011,6 +2016,18 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.cls_prot = NET_PROT_ETH, .cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA, .cls_field = NH_FLD_ETH_DA,
.size = 6, .size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
.size = 6,
}, {
/* This is the last ethertype field parsed:
* depending on frame format, it can be the MAC ethertype
* or the VLAN etype.
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
.size = 2,
}, { }, {
/* VLAN header */ /* VLAN header */
.rxnfc_field = RXH_VLAN, .rxnfc_field = RXH_VLAN,
...@@ -2109,6 +2126,33 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) ...@@ -2109,6 +2126,33 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
return err; return err;
} }
/* Size of the Rx flow classification key */
int dpaa2_eth_cls_key_size(void)
{
int i, size = 0;
for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
size += dist_fields[i].size;
return size;
}
/* Offset of header field in Rx classification key */
int dpaa2_eth_cls_fld_off(int prot, int field)
{
int i, off = 0;
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
if (dist_fields[i].cls_prot == prot &&
dist_fields[i].cls_field == field)
return off;
off += dist_fields[i].size;
}
WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
return 0;
}
/* Set Rx distribution (hash or flow classification) key /* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits * flags is a combination of RXH_ bits
*/ */
......
...@@ -297,6 +297,11 @@ struct dpaa2_eth_dist_fields { ...@@ -297,6 +297,11 @@ struct dpaa2_eth_dist_fields {
int size; int size;
}; };
struct dpaa2_eth_cls_rule {
struct ethtool_rx_flow_spec fs;
u8 in_use;
};
/* Driver private data */ /* Driver private data */
struct dpaa2_eth_priv { struct dpaa2_eth_priv {
struct net_device *net_dev; struct net_device *net_dev;
...@@ -340,6 +345,7 @@ struct dpaa2_eth_priv { ...@@ -340,6 +345,7 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */ /* enabled ethtool hashing bits */
u64 rx_hash_fields; u64 rx_hash_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled; u8 rx_cls_enabled;
}; };
...@@ -378,6 +384,9 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, ...@@ -378,6 +384,9 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0) DPNI_RX_DIST_KEY_VER_MINOR) < 0)
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
enum dpaa2_eth_rx_dist { enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_HASH, DPAA2_ETH_RX_DIST_HASH,
DPAA2_ETH_RX_DIST_CLS DPAA2_ETH_RX_DIST_CLS
...@@ -426,5 +435,7 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) ...@@ -426,5 +435,7 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
} }
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_cls_key_size(void);
int dpaa2_eth_cls_fld_off(int prot, int field);
#endif /* __DPAA2_H */ #endif /* __DPAA2_H */
...@@ -224,10 +224,310 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, ...@@ -224,10 +224,310 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = cdan; *(data + i++) = cdan;
} }
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
void *key, void *mask)
{
int off;
if (eth_mask->h_proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = eth_value->h_proto;
*(__be16 *)(mask + off) = eth_mask->h_proto;
}
if (!is_zero_ether_addr(eth_mask->h_source)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
ether_addr_copy(key + off, eth_value->h_source);
ether_addr_copy(mask + off, eth_mask->h_source);
}
if (!is_zero_ether_addr(eth_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, eth_value->h_dest);
ether_addr_copy(mask + off, eth_mask->h_dest);
}
return 0;
}
static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
struct ethtool_usrip4_spec *uip_mask,
void *key, void *mask)
{
int off;
u32 tmp_value, tmp_mask;
if (uip_mask->tos || uip_mask->ip_ver)
return -EOPNOTSUPP;
if (uip_mask->ip4src) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = uip_value->ip4src;
*(__be32 *)(mask + off) = uip_mask->ip4src;
}
if (uip_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = uip_value->ip4dst;
*(__be32 *)(mask + off) = uip_mask->ip4dst;
}
if (uip_mask->proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = uip_value->proto;
*(u8 *)(mask + off) = uip_mask->proto;
}
if (uip_mask->l4_4_bytes) {
tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = htons(tmp_value >> 16);
*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
}
/* Only apply the rule for IPv4 frames */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
return 0;
}
static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
struct ethtool_tcpip4_spec *l4_mask,
void *key, void *mask, u8 l4_proto)
{
int off;
if (l4_mask->tos)
return -EOPNOTSUPP;
if (l4_mask->ip4src) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = l4_value->ip4src;
*(__be32 *)(mask + off) = l4_mask->ip4src;
}
if (l4_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = l4_value->ip4dst;
*(__be32 *)(mask + off) = l4_mask->ip4dst;
}
if (l4_mask->psrc) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = l4_value->psrc;
*(__be16 *)(mask + off) = l4_mask->psrc;
}
if (l4_mask->pdst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = l4_value->pdst;
*(__be16 *)(mask + off) = l4_mask->pdst;
}
/* Only apply the rule for IPv4 frames with the specified L4 proto */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = l4_proto;
*(u8 *)(mask + off) = 0xFF;
return 0;
}
static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
void *key, void *mask)
{
int off;
if (ext_mask->vlan_etype)
return -EOPNOTSUPP;
if (ext_mask->vlan_tci) {
off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
*(__be16 *)(key + off) = ext_value->vlan_tci;
*(__be16 *)(mask + off) = ext_mask->vlan_tci;
}
return 0;
}
static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
void *key, void *mask)
{
int off;
if (!is_zero_ether_addr(ext_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, ext_value->h_dest);
ether_addr_copy(mask + off, ext_mask->h_dest);
}
return 0;
}
static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
key, mask);
break;
case IP_USER_FLOW:
err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
&fs->m_u.usr_ip4_spec, key, mask);
break;
case TCP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
key, mask, IPPROTO_TCP);
break;
case UDP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
key, mask, IPPROTO_UDP);
break;
case SCTP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
&fs->m_u.sctp_ip4_spec, key, mask,
IPPROTO_SCTP);
break;
default:
return -EOPNOTSUPP;
}
if (err)
return err;
if (fs->flow_type & FLOW_EXT) {
err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
if (err)
return err;
}
return 0;
}
static int do_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *fs,
bool add)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
struct dpni_rule_cfg rule_cfg = { 0 };
struct dpni_fs_action_cfg fs_act = { 0 };
dma_addr_t key_iova;
void *key_buf;
int err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
return -EINVAL;
rule_cfg.key_size = dpaa2_eth_cls_key_size();
/* allocate twice the key size, for the actual key and for mask */
key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
if (!key_buf)
return -ENOMEM;
/* Fill the key and mask memory areas */
err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
if (err)
goto free_mem;
key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) {
err = -ENOMEM;
goto free_mem;
}
rule_cfg.key_iova = key_iova;
rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
if (add) {
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
fs_act.options |= DPNI_FS_OPT_DISCARD;
else
fs_act.flow_id = fs->ring_cookie;
err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
fs->location, &rule_cfg, &fs_act);
} else {
err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
&rule_cfg);
}
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
free_mem:
kfree(key_buf);
return err;
}
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
int err = -EINVAL;
if (!priv->rx_cls_enabled)
return -EOPNOTSUPP;
if (location >= dpaa2_eth_fs_count(priv))
return -EINVAL;
rule = &priv->cls_rules[location];
/* If a rule is present at the specified location, delete it. */
if (rule->in_use) {
err = do_cls_rule(net_dev, &rule->fs, false);
if (err)
return err;
rule->in_use = 0;
}
/* If no new entry to add, return here */
if (!new_fs)
return err;
err = do_cls_rule(net_dev, new_fs, true);
if (err)
return err;
rule->in_use = 1;
rule->fs = *new_fs;
return 0;
}
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs) struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{ {
struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int max_rules = dpaa2_eth_fs_count(priv);
int i, j = 0;
switch (rxnfc->cmd) { switch (rxnfc->cmd) {
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
...@@ -240,6 +540,31 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, ...@@ -240,6 +540,31 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv); rxnfc->data = dpaa2_eth_queue_count(priv);
break; break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
for (i = 0; i < max_rules; i++)
if (priv->cls_rules[i].in_use)
rxnfc->rule_cnt++;
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
if (rxnfc->fs.location >= max_rules)
return -EINVAL;
if (!priv->cls_rules[rxnfc->fs.location].in_use)
return -EINVAL;
rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < max_rules; i++) {
if (!priv->cls_rules[i].in_use)
continue;
if (j == rxnfc->rule_cnt)
return -EMSGSIZE;
rule_locs[j++] = i;
}
rxnfc->rule_cnt = j;
rxnfc->data = max_rules;
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -258,6 +583,12 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, ...@@ -258,6 +583,12 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = dpaa2_eth_set_hash(net_dev, rxnfc->data); err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
break; break;
case ETHTOOL_SRXCLSRLINS:
err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
case ETHTOOL_SRXCLSRLDEL:
err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
} }
......
...@@ -539,4 +539,31 @@ struct dpni_cmd_set_rx_hash_dist { ...@@ -539,4 +539,31 @@ struct dpni_cmd_set_rx_hash_dist {
__le64 key_cfg_iova; __le64 key_cfg_iova;
}; };
struct dpni_cmd_add_fs_entry {
/* cmd word 0 */
__le16 options;
u8 tc_id;
u8 key_size;
__le16 index;
__le16 flow_id;
/* cmd word 1 */
__le64 key_iova;
/* cmd word 2 */
__le64 mask_iova;
/* cmd word 3 */
__le64 flc;
};
struct dpni_cmd_remove_fs_entry {
/* cmd word 0 */
__le16 pad0;
u8 tc_id;
u8 key_size;
__le32 pad1;
/* cmd word 1 */
__le64 key_iova;
/* cmd word 2 */
__le64 mask_iova;
};
#endif /* _FSL_DPNI_CMD_H */ #endif /* _FSL_DPNI_CMD_H */
...@@ -1672,3 +1672,81 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, ...@@ -1672,3 +1672,81 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
/* send command to mc*/ /* send command to mc*/
return mc_send_command(mc_io, &cmd); return mc_send_command(mc_io, &cmd);
} }
/**
* dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
* (to select a flow ID)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @tc_id: Traffic class selection (0-7)
* @index: Location in the FS table where to insert the entry.
* Only relevant if MASKING is enabled for FS
* classification on this DPNI, it is ignored for exact match.
* @cfg: Flow steering rule to add
* @action: Action to be taken as result of a classification hit
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
u16 index,
const struct dpni_rule_cfg *cfg,
const struct dpni_fs_action_cfg *action)
{
struct dpni_cmd_add_fs_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
cmd_params->tc_id = tc_id;
cmd_params->key_size = cfg->key_size;
cmd_params->index = cpu_to_le16(index);
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
cmd_params->options = cpu_to_le16(action->options);
cmd_params->flow_id = cpu_to_le16(action->flow_id);
cmd_params->flc = cpu_to_le64(action->flc);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
* traffic class
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @tc_id: Traffic class selection (0-7)
* @cfg: Flow steering rule to remove
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
const struct dpni_rule_cfg *cfg)
{
struct dpni_cmd_remove_fs_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
cmd_params->tc_id = tc_id;
cmd_params->key_size = cfg->key_size;
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
...@@ -855,6 +855,64 @@ struct dpni_rule_cfg { ...@@ -855,6 +855,64 @@ struct dpni_rule_cfg {
u8 key_size; u8 key_size;
}; };
/**
* Discard matching traffic. If set, this takes precedence over any other
* configuration and matching traffic is always discarded.
*/
#define DPNI_FS_OPT_DISCARD 0x1
/**
* Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
* override the FLC value set per queue.
* For more details check the Frame Descriptor section in the hardware
* documentation.
*/
#define DPNI_FS_OPT_SET_FLC 0x2
/**
* Indicates whether the 6 lowest significant bits of FLC are used for stash
* control. If set, the 6 least significant bits in value are interpreted as
* follows:
* - bits 0-1: indicates the number of 64 byte units of context that are
* stashed. FLC value is interpreted as a memory address in this case,
* excluding the 6 LS bits.
* - bits 2-3: indicates the number of 64 byte units of frame annotation
* to be stashed. Annotation is placed at FD[ADDR].
* - bits 4-5: indicates the number of 64 byte units of frame data to be
* stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
* This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
*/
#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
/**
* struct dpni_fs_action_cfg - Action configuration for table look-up
* @flc: FLC value for traffic matching this rule. Please check the
* Frame Descriptor section in the hardware documentation for
* more information.
* @flow_id: Identifies the Rx queue used for matching traffic. Supported
* values are in range 0 to num_queue-1.
* @options: Any combination of DPNI_FS_OPT_ values.
*/
struct dpni_fs_action_cfg {
u64 flc;
u16 flow_id;
u16 options;
};
int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
u16 index,
const struct dpni_rule_cfg *cfg,
const struct dpni_fs_action_cfg *action);
int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
const struct dpni_rule_cfg *cfg);
int dpni_get_api_version(struct fsl_mc_io *mc_io, int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags, u32 cmd_flags,
u16 *major_ver, u16 *major_ver,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment