Commit 42fac7bd authored by David S. Miller's avatar David S. Miller

Merge branch 'dpaa2-eth-Add-support-for-Rx-flow-classification'

Ioana Radulescu says:

====================
dpaa2-eth: Add support for Rx flow classification

The Management Complex (MC) firmware initially allowed the
configuration of a single key to be used both for Rx flow hashing
and flow classification. This prevented us from supporting
Rx flow classification independently of the hash key configuration.

Newer firmware versions expose separate commands for
configuring the two types of keys, so we can use them to
introduce Rx classification support. For frames that don't match
any classification rule, we fall back to statistical distribution
based on the current hash key.

The first patch in this set updates the Rx hashing code to use
the new firmware API for key config. Subsequent patches introduce
the firmware API for configuring the classification and actual
support for adding and deleting rules via ethtool.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f3edc2db afb90dbb
......@@ -1897,6 +1897,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
if (err)
goto close;
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
goto close;
return 0;
close:
......@@ -2004,13 +2009,25 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
}
/* Supported header fields for Rx hash distribution key */
static const struct dpaa2_eth_hash_fields hash_fields[] = {
static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
/* L2 header */
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
.size = 6,
}, {
/* This is the last ethertype field parsed:
* depending on frame format, it can be the MAC ethertype
* or the VLAN etype.
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
.size = 2,
}, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
......@@ -2049,33 +2066,122 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = {
},
};
/* Set RX hash options
/* Configure the Rx hash key using the legacy API */
static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
int err;
memset(&dist_cfg, 0, sizeof(dist_cfg));
dist_cfg.key_cfg_iova = key;
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_tc_dist failed\n");
return err;
}
/* Configure the Rx hash key using the new API */
static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
int err;
memset(&dist_cfg, 0, sizeof(dist_cfg));
dist_cfg.key_cfg_iova = key;
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_hash_dist failed\n");
return err;
}
/* Configure the Rx flow classification key */
static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
int err;
memset(&dist_cfg, 0, sizeof(dist_cfg));
dist_cfg.key_cfg_iova = key;
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_fs_dist failed\n");
return err;
}
/* Size of the Rx flow classification key */
int dpaa2_eth_cls_key_size(void)
{
int i, size = 0;
for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
size += dist_fields[i].size;
return size;
}
/* Offset of header field in Rx classification key */
int dpaa2_eth_cls_fld_off(int prot, int field)
{
int i, off = 0;
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
if (dist_fields[i].cls_prot == prot &&
dist_fields[i].cls_field == field)
return off;
off += dist_fields[i].size;
}
WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
return 0;
}
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
int dpaa2_eth_set_dist_key(struct net_device *net_dev,
enum dpaa2_eth_rx_dist type, u64 flags)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpkg_profile_cfg cls_cfg;
struct dpni_rx_tc_dist_cfg dist_cfg;
u32 rx_hash_fields = 0;
dma_addr_t key_iova;
u8 *dma_mem;
int i;
int err = 0;
if (!dpaa2_eth_hash_enabled(priv)) {
dev_dbg(dev, "Hashing support is not enabled\n");
return -EOPNOTSUPP;
}
memset(&cls_cfg, 0, sizeof(cls_cfg));
for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
if (!(flags & hash_fields[i].rxnfc_field))
continue;
/* For Rx hashing key we set only the selected fields.
* For Rx flow classification key we set all supported fields
*/
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (!(flags & dist_fields[i].rxnfc_field))
continue;
rx_hash_fields |= dist_fields[i].rxnfc_field;
}
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
......@@ -2083,12 +2189,10 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
}
key->type = DPKG_EXTRACT_FROM_HDR;
key->extract.from_hdr.prot = hash_fields[i].cls_prot;
key->extract.from_hdr.prot = dist_fields[i].cls_prot;
key->extract.from_hdr.type = DPKG_FULL_FIELD;
key->extract.from_hdr.field = hash_fields[i].cls_field;
key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
rx_hash_fields |= hash_fields[i].rxnfc_field;
}
dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
......@@ -2098,38 +2202,73 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
if (err) {
dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
goto err_prep_key;
goto free_key;
}
memset(&dist_cfg, 0, sizeof(dist_cfg));
/* Prepare for setting the rx dist */
dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
DPAA2_CLASSIFIER_DMA_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) {
dev_err(dev, "DMA mapping failed\n");
err = -ENOMEM;
goto err_dma_map;
goto free_key;
}
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (dpaa2_eth_has_legacy_dist(priv))
err = config_legacy_hash_key(priv, key_iova);
else
err = config_hash_key(priv, key_iova);
} else {
err = config_cls_key(priv, key_iova);
}
err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
dma_unmap_single(dev, dist_cfg.key_cfg_iova,
DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
if (err)
dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
else
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
DMA_TO_DEVICE);
if (!err && type == DPAA2_ETH_RX_DIST_HASH)
priv->rx_hash_fields = rx_hash_fields;
err_dma_map:
err_prep_key:
free_key:
kfree(dma_mem);
return err;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP;
return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
}
static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
/* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) {
dev_dbg(dev, "Rx cls not supported by current MC version\n");
return -EOPNOTSUPP;
}
if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
!(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP;
}
if (!dpaa2_eth_hash_enabled(priv)) {
dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
return -EOPNOTSUPP;
}
priv->rx_cls_enabled = 1;
return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
}
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
......@@ -2159,6 +2298,13 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure hashing\n");
/* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime
*/
err = dpaa2_eth_set_cls(priv);
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n");
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
err_cfg.set_frame_annotation = 1;
......
......@@ -290,13 +290,18 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_stats stats;
};
struct dpaa2_eth_hash_fields {
struct dpaa2_eth_dist_fields {
u64 rxnfc_field;
enum net_prot cls_prot;
int cls_field;
int size;
};
struct dpaa2_eth_cls_rule {
struct ethtool_rx_flow_spec fs;
u8 in_use;
};
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
......@@ -340,6 +345,8 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
......@@ -367,6 +374,24 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
return priv->dpni_ver_major - ver_major;
}
/* Minimum firmware version that supports a more flexible API
* for configuring the Rx flow hash key
*/
#define DPNI_RX_DIST_KEY_VER_MAJOR 7
#define DPNI_RX_DIST_KEY_VER_MINOR 5
#define dpaa2_eth_has_legacy_dist(priv) \
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0)
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_HASH,
DPAA2_ETH_RX_DIST_CLS
};
/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
* the buffer also needs space for its shared info struct, and we need
* to allocate enough to accommodate hardware alignment restrictions
......@@ -410,5 +435,7 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_cls_key_size(void);
int dpaa2_eth_cls_fld_off(int prot, int field);
#endif /* __DPAA2_H */
......@@ -224,10 +224,310 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = cdan;
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
void *key, void *mask)
{
int off;
if (eth_mask->h_proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = eth_value->h_proto;
*(__be16 *)(mask + off) = eth_mask->h_proto;
}
if (!is_zero_ether_addr(eth_mask->h_source)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
ether_addr_copy(key + off, eth_value->h_source);
ether_addr_copy(mask + off, eth_mask->h_source);
}
if (!is_zero_ether_addr(eth_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, eth_value->h_dest);
ether_addr_copy(mask + off, eth_mask->h_dest);
}
return 0;
}
static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
struct ethtool_usrip4_spec *uip_mask,
void *key, void *mask)
{
int off;
u32 tmp_value, tmp_mask;
if (uip_mask->tos || uip_mask->ip_ver)
return -EOPNOTSUPP;
if (uip_mask->ip4src) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = uip_value->ip4src;
*(__be32 *)(mask + off) = uip_mask->ip4src;
}
if (uip_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = uip_value->ip4dst;
*(__be32 *)(mask + off) = uip_mask->ip4dst;
}
if (uip_mask->proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = uip_value->proto;
*(u8 *)(mask + off) = uip_mask->proto;
}
if (uip_mask->l4_4_bytes) {
tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = htons(tmp_value >> 16);
*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
}
/* Only apply the rule for IPv4 frames */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
return 0;
}
static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
struct ethtool_tcpip4_spec *l4_mask,
void *key, void *mask, u8 l4_proto)
{
int off;
if (l4_mask->tos)
return -EOPNOTSUPP;
if (l4_mask->ip4src) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = l4_value->ip4src;
*(__be32 *)(mask + off) = l4_mask->ip4src;
}
if (l4_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = l4_value->ip4dst;
*(__be32 *)(mask + off) = l4_mask->ip4dst;
}
if (l4_mask->psrc) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = l4_value->psrc;
*(__be16 *)(mask + off) = l4_mask->psrc;
}
if (l4_mask->pdst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = l4_value->pdst;
*(__be16 *)(mask + off) = l4_mask->pdst;
}
/* Only apply the rule for IPv4 frames with the specified L4 proto */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = l4_proto;
*(u8 *)(mask + off) = 0xFF;
return 0;
}
static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
void *key, void *mask)
{
int off;
if (ext_mask->vlan_etype)
return -EOPNOTSUPP;
if (ext_mask->vlan_tci) {
off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
*(__be16 *)(key + off) = ext_value->vlan_tci;
*(__be16 *)(mask + off) = ext_mask->vlan_tci;
}
return 0;
}
static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
void *key, void *mask)
{
int off;
if (!is_zero_ether_addr(ext_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, ext_value->h_dest);
ether_addr_copy(mask + off, ext_mask->h_dest);
}
return 0;
}
static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
key, mask);
break;
case IP_USER_FLOW:
err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
&fs->m_u.usr_ip4_spec, key, mask);
break;
case TCP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
key, mask, IPPROTO_TCP);
break;
case UDP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
key, mask, IPPROTO_UDP);
break;
case SCTP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
&fs->m_u.sctp_ip4_spec, key, mask,
IPPROTO_SCTP);
break;
default:
return -EOPNOTSUPP;
}
if (err)
return err;
if (fs->flow_type & FLOW_EXT) {
err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
if (err)
return err;
}
return 0;
}
static int do_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *fs,
bool add)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
struct dpni_rule_cfg rule_cfg = { 0 };
struct dpni_fs_action_cfg fs_act = { 0 };
dma_addr_t key_iova;
void *key_buf;
int err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
return -EINVAL;
rule_cfg.key_size = dpaa2_eth_cls_key_size();
/* allocate twice the key size, for the actual key and for mask */
key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
if (!key_buf)
return -ENOMEM;
/* Fill the key and mask memory areas */
err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
if (err)
goto free_mem;
key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) {
err = -ENOMEM;
goto free_mem;
}
rule_cfg.key_iova = key_iova;
rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
if (add) {
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
fs_act.options |= DPNI_FS_OPT_DISCARD;
else
fs_act.flow_id = fs->ring_cookie;
err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
fs->location, &rule_cfg, &fs_act);
} else {
err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
&rule_cfg);
}
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
free_mem:
kfree(key_buf);
return err;
}
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
int err = -EINVAL;
if (!priv->rx_cls_enabled)
return -EOPNOTSUPP;
if (location >= dpaa2_eth_fs_count(priv))
return -EINVAL;
rule = &priv->cls_rules[location];
/* If a rule is present at the specified location, delete it. */
if (rule->in_use) {
err = do_cls_rule(net_dev, &rule->fs, false);
if (err)
return err;
rule->in_use = 0;
}
/* If no new entry to add, return here */
if (!new_fs)
return err;
err = do_cls_rule(net_dev, new_fs, true);
if (err)
return err;
rule->in_use = 1;
rule->fs = *new_fs;
return 0;
}
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int max_rules = dpaa2_eth_fs_count(priv);
int i, j = 0;
switch (rxnfc->cmd) {
case ETHTOOL_GRXFH:
......@@ -240,6 +540,31 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv);
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
for (i = 0; i < max_rules; i++)
if (priv->cls_rules[i].in_use)
rxnfc->rule_cnt++;
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
if (rxnfc->fs.location >= max_rules)
return -EINVAL;
if (!priv->cls_rules[rxnfc->fs.location].in_use)
return -EINVAL;
rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < max_rules; i++) {
if (!priv->cls_rules[i].in_use)
continue;
if (j == rxnfc->rule_cnt)
return -EMSGSIZE;
rule_locs[j++] = i;
}
rxnfc->rule_cnt = j;
rxnfc->data = max_rules;
break;
default:
return -EOPNOTSUPP;
}
......@@ -258,6 +583,12 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return -EOPNOTSUPP;
err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
break;
case ETHTOOL_SRXCLSRLINS:
err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
case ETHTOOL_SRXCLSRLDEL:
err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
break;
default:
err = -EOPNOTSUPP;
}
......
......@@ -82,6 +82,9 @@
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
......@@ -515,4 +518,52 @@ struct dpni_rsp_get_api_version {
__le16 minor;
};
#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_fs_dist {
__le16 dist_size;
u8 enable;
u8 tc;
__le16 miss_flow_id;
__le16 pad;
__le64 key_cfg_iova;
};
#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_hash_dist {
__le16 dist_size;
u8 enable;
u8 tc;
__le32 pad;
__le64 key_cfg_iova;
};
struct dpni_cmd_add_fs_entry {
/* cmd word 0 */
__le16 options;
u8 tc_id;
u8 key_size;
__le16 index;
__le16 flow_id;
/* cmd word 1 */
__le64 key_iova;
/* cmd word 2 */
__le64 mask_iova;
/* cmd word 3 */
__le64 flc;
};
struct dpni_cmd_remove_fs_entry {
/* cmd word 0 */
__le16 pad0;
u8 tc_id;
u8 key_size;
__le32 pad1;
/* cmd word 1 */
__le64 key_iova;
/* cmd word 2 */
__le64 mask_iova;
};
#endif /* _FSL_DPNI_CMD_H */
......@@ -1598,3 +1598,155 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
/**
* dpni_set_rx_fs_dist() - Set Rx flow steering distribution
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: Distribution configuration
*
* If the FS is already enabled with a previous call the classification
* key will be changed but all the table rules are kept. If the
* existing rules do not match the key the results will not be
* predictable. It is the user responsibility to keep key integrity.
* If cfg.enable is set to 1 the command will create a flow steering table
* and will classify packets according to this table. The packets that
* miss all the table rules will be classified according to settings
* made in dpni_set_rx_hash_dist()
* If cfg.enable is set to 0 the command will clear flow steering table.
* The packets will be classified according to settings made in
* dpni_set_rx_hash_dist()
*/
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rx_dist_cfg *cfg)
{
struct dpni_cmd_set_rx_fs_dist *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
cmd_params->tc = cfg->tc;
cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_set_rx_hash_dist() - Set Rx hash distribution
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: Distribution configuration
* If cfg.enable is set to 1 the packets will be classified using a hash
* function based on the key received in cfg.key_cfg_iova parameter.
* If cfg.enable is set to 0 the packets will be sent to the default queue
*/
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rx_dist_cfg *cfg)
{
struct dpni_cmd_set_rx_hash_dist *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
cmd_params->tc = cfg->tc;
cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
* (to select a flow ID)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @tc_id: Traffic class selection (0-7)
* @index: Location in the FS table where to insert the entry.
* Only relevant if MASKING is enabled for FS
* classification on this DPNI, it is ignored for exact match.
* @cfg: Flow steering rule to add
* @action: Action to be taken as result of a classification hit
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
u16 index,
const struct dpni_rule_cfg *cfg,
const struct dpni_fs_action_cfg *action)
{
struct dpni_cmd_add_fs_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
cmd_params->tc_id = tc_id;
cmd_params->key_size = cfg->key_size;
cmd_params->index = cpu_to_le16(index);
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
cmd_params->options = cpu_to_le16(action->options);
cmd_params->flow_id = cpu_to_le16(action->flow_id);
cmd_params->flc = cpu_to_le64(action->flc);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
* traffic class
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @tc_id: Traffic class selection (0-7)
* @cfg: Flow steering rule to remove
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
const struct dpni_rule_cfg *cfg)
{
struct dpni_cmd_remove_fs_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
cmd_params->tc_id = tc_id;
cmd_params->key_size = cfg->key_size;
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
......@@ -628,6 +628,45 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
u8 tc_id,
const struct dpni_rx_tc_dist_cfg *cfg);
/**
* When used for fs_miss_flow_id in function dpni_set_rx_dist,
* will signal to dpni to drop all unclassified frames
*/
#define DPNI_FS_MISS_DROP ((uint16_t)-1)
/**
* struct dpni_rx_dist_cfg - Rx distribution configuration
* @dist_size: distribution size
* @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
* the extractions to be used for the distribution key by calling
* dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
* it can be '0'
* @enable: enable/disable the distribution.
* @tc: TC id for which distribution is set
* @fs_miss_flow_id: when packet misses all rules from flow steering table and
* hash is disabled it will be put into this queue id; use
* DPNI_FS_MISS_DROP to drop frames. The value of this field is
* used only when flow steering distribution is enabled and hash
* distribution is disabled
*/
struct dpni_rx_dist_cfg {
u16 dist_size;
u64 key_cfg_iova;
u8 enable;
u8 tc;
u16 fs_miss_flow_id;
};
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rx_dist_cfg *cfg);
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rx_dist_cfg *cfg);
/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
......@@ -816,6 +855,64 @@ struct dpni_rule_cfg {
u8 key_size;
};
/**
* Discard matching traffic. If set, this takes precedence over any other
* configuration and matching traffic is always discarded.
*/
#define DPNI_FS_OPT_DISCARD 0x1
/**
* Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
* override the FLC value set per queue.
* For more details check the Frame Descriptor section in the hardware
* documentation.
*/
#define DPNI_FS_OPT_SET_FLC 0x2
/**
* Indicates whether the 6 lowest significant bits of FLC are used for stash
* control. If set, the 6 least significant bits in value are interpreted as
* follows:
* - bits 0-1: indicates the number of 64 byte units of context that are
* stashed. FLC value is interpreted as a memory address in this case,
* excluding the 6 LS bits.
* - bits 2-3: indicates the number of 64 byte units of frame annotation
* to be stashed. Annotation is placed at FD[ADDR].
* - bits 4-5: indicates the number of 64 byte units of frame data to be
* stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
* This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
*/
#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
/**
* struct dpni_fs_action_cfg - Action configuration for table look-up
* @flc: FLC value for traffic matching this rule. Please check the
* Frame Descriptor section in the hardware documentation for
* more information.
* @flow_id: Identifies the Rx queue used for matching traffic. Supported
* values are in range 0 to num_queue-1.
* @options: Any combination of DPNI_FS_OPT_ values.
*/
struct dpni_fs_action_cfg {
u64 flc;
u16 flow_id;
u16 options;
};
int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
u16 index,
const struct dpni_rule_cfg *cfg,
const struct dpni_fs_action_cfg *action);
int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 tc_id,
const struct dpni_rule_cfg *cfg);
int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment