Commit 17f780b3 authored by David S. Miller's avatar David S. Miller

Merge branch 'dpaa2-eth-Add-flow-steering-support-without-masking'

Ioana Ciocoi Radulescu says:

====================
dpaa2-eth: Add flow steering support without masking

On DPAA2 platforms that lack a TCAM (like LS1088A), masking of
flow steering keys is not supported. Until now we didn't offer
flow steering capabilities at all on these platforms.

Introduce a limited support for flow steering, where we only
allow ethtool rules that share a common key (i.e. have the same
header fields). If a rule with a new composition key is wanted,
the user must first manually delete all previous rules.

First patch fixes a minor bug, the next two cleanup and prepare
the code and the last one introduces the actual FS support.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9bad65e5 2d680237
...@@ -2571,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { ...@@ -2571,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L2DA, .rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH, .cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA, .cls_field = NH_FLD_ETH_DA,
.id = DPAA2_ETH_DIST_ETHDST,
.size = 6, .size = 6,
}, { }, {
.cls_prot = NET_PROT_ETH, .cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA, .cls_field = NH_FLD_ETH_SA,
.id = DPAA2_ETH_DIST_ETHSRC,
.size = 6, .size = 6,
}, { }, {
/* This is the last ethertype field parsed: /* This is the last ethertype field parsed:
...@@ -2583,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { ...@@ -2583,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
*/ */
.cls_prot = NET_PROT_ETH, .cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE, .cls_field = NH_FLD_ETH_TYPE,
.id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2, .size = 2,
}, { }, {
/* VLAN header */ /* VLAN header */
.rxnfc_field = RXH_VLAN, .rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN, .cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI, .cls_field = NH_FLD_VLAN_TCI,
.id = DPAA2_ETH_DIST_VLAN,
.size = 2, .size = 2,
}, { }, {
/* IP header */ /* IP header */
.rxnfc_field = RXH_IP_SRC, .rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP, .cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC, .cls_field = NH_FLD_IP_SRC,
.id = DPAA2_ETH_DIST_IPSRC,
.size = 4, .size = 4,
}, { }, {
.rxnfc_field = RXH_IP_DST, .rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP, .cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST, .cls_field = NH_FLD_IP_DST,
.id = DPAA2_ETH_DIST_IPDST,
.size = 4, .size = 4,
}, { }, {
.rxnfc_field = RXH_L3_PROTO, .rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP, .cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO, .cls_field = NH_FLD_IP_PROTO,
.id = DPAA2_ETH_DIST_IPPROTO,
.size = 1, .size = 1,
}, { }, {
/* Using UDP ports, this is functionally equivalent to raw /* Using UDP ports, this is functionally equivalent to raw
...@@ -2613,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { ...@@ -2613,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L4_B_0_1, .rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP, .cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC, .cls_field = NH_FLD_UDP_PORT_SRC,
.id = DPAA2_ETH_DIST_L4SRC,
.size = 2, .size = 2,
}, { }, {
.rxnfc_field = RXH_L4_B_2_3, .rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP, .cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST, .cls_field = NH_FLD_UDP_PORT_DST,
.id = DPAA2_ETH_DIST_L4DST,
.size = 2, .size = 2,
}, },
}; };
...@@ -2683,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) ...@@ -2683,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
} }
/* Size of the Rx flow classification key */ /* Size of the Rx flow classification key */
int dpaa2_eth_cls_key_size(void) int dpaa2_eth_cls_key_size(u64 fields)
{ {
int i, size = 0; int i, size = 0;
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
if (!(fields & dist_fields[i].id))
continue;
size += dist_fields[i].size; size += dist_fields[i].size;
}
return size; return size;
} }
...@@ -2709,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field) ...@@ -2709,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
return 0; return 0;
} }
/* Prune unused fields from the classification rule.
* Used when masking is not supported
*/
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
{
int off = 0, new_off = 0;
int i, size;
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
size = dist_fields[i].size;
if (dist_fields[i].id & fields) {
memcpy(key_mem + new_off, key_mem + off, size);
new_off += size;
}
off += size;
}
}
/* Set Rx distribution (hash or flow classification) key /* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits * flags is a combination of RXH_ bits
*/ */
...@@ -2730,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev, ...@@ -2730,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
struct dpkg_extract *key = struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts]; &cls_cfg.extracts[cls_cfg.num_extracts];
/* For Rx hashing key we set only the selected fields. /* For both Rx hashing and classification keys
* For Rx flow classification key we set all supported fields * we set only the selected fields.
*/ */
if (type == DPAA2_ETH_RX_DIST_HASH) { if (!(flags & dist_fields[i].id))
if (!(flags & dist_fields[i].rxnfc_field))
continue; continue;
if (type == DPAA2_ETH_RX_DIST_HASH)
rx_hash_fields |= dist_fields[i].rxnfc_field; rx_hash_fields |= dist_fields[i].rxnfc_field;
}
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n"); dev_err(dev, "error adding key extraction rule, too many rules?\n");
...@@ -2792,16 +2821,28 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev, ...@@ -2792,16 +2821,28 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{ {
struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 key = 0;
int i;
if (!dpaa2_eth_hash_enabled(priv)) if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
if (dist_fields[i].rxnfc_field & flags)
key |= dist_fields[i].id;
return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
} }
static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
{
return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
}
static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
{ {
struct device *dev = priv->net_dev->dev.parent; struct device *dev = priv->net_dev->dev.parent;
int err;
/* Check if we actually support Rx flow classification */ /* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) { if (dpaa2_eth_has_legacy_dist(priv)) {
...@@ -2809,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) ...@@ -2809,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || if (!dpaa2_eth_fs_enabled(priv)) {
!(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n"); dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -2820,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) ...@@ -2820,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* If there is no support for masking in the classification table,
* we don't set a default key, as it will depend on the rules
* added by the user at runtime.
*/
if (!dpaa2_eth_fs_mask_enabled(priv))
goto out;
err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
if (err)
return err;
out:
priv->rx_cls_enabled = 1; priv->rx_cls_enabled = 1;
return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); return 0;
} }
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
...@@ -2857,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) ...@@ -2857,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
/* Configure the flow classification key; it includes all /* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime * supported header fields and cannot be modified at runtime
*/ */
err = dpaa2_eth_set_cls(priv); err = dpaa2_eth_set_default_cls(priv);
if (err && err != -EOPNOTSUPP) if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n"); dev_err(dev, "Failed to configure Rx classification key\n");
......
...@@ -342,6 +342,7 @@ struct dpaa2_eth_dist_fields { ...@@ -342,6 +342,7 @@ struct dpaa2_eth_dist_fields {
enum net_prot cls_prot; enum net_prot cls_prot;
int cls_field; int cls_field;
int size; int size;
u64 id;
}; };
struct dpaa2_eth_cls_rule { struct dpaa2_eth_cls_rule {
...@@ -394,6 +395,7 @@ struct dpaa2_eth_priv { ...@@ -394,6 +395,7 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */ /* enabled ethtool hashing bits */
u64 rx_hash_fields; u64 rx_hash_fields;
u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules; struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled; u8 rx_cls_enabled;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
...@@ -437,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, ...@@ -437,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0) DPNI_RX_DIST_KEY_VER_MINOR) < 0)
#define dpaa2_eth_fs_enabled(priv) \
(!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
#define dpaa2_eth_fs_mask_enabled(priv) \
((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
#define dpaa2_eth_fs_count(priv) \ #define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries) ((priv)->dpni_attrs.fs_entries)
...@@ -449,6 +457,18 @@ enum dpaa2_eth_rx_dist { ...@@ -449,6 +457,18 @@ enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_CLS DPAA2_ETH_RX_DIST_CLS
}; };
/* Unique IDs for the supported Rx classification header fields */
#define DPAA2_ETH_DIST_ETHDST BIT(0)
#define DPAA2_ETH_DIST_ETHSRC BIT(1)
#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
#define DPAA2_ETH_DIST_VLAN BIT(3)
#define DPAA2_ETH_DIST_IPSRC BIT(4)
#define DPAA2_ETH_DIST_IPDST BIT(5)
#define DPAA2_ETH_DIST_IPPROTO BIT(6)
#define DPAA2_ETH_DIST_L4SRC BIT(7)
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0U)
static inline static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -483,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) ...@@ -483,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
} }
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_cls_key_size(void); int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field); int dpaa2_eth_cls_fld_off(int prot, int field);
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
#endif /* __DPAA2_H */ #endif /* __DPAA2_H */
...@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, ...@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
} }
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
void *key, void *mask) void *key, void *mask, u64 *fields)
{ {
int off; int off;
...@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, ...@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = eth_value->h_proto; *(__be16 *)(key + off) = eth_value->h_proto;
*(__be16 *)(mask + off) = eth_mask->h_proto; *(__be16 *)(mask + off) = eth_mask->h_proto;
*fields |= DPAA2_ETH_DIST_ETHTYPE;
} }
if (!is_zero_ether_addr(eth_mask->h_source)) { if (!is_zero_ether_addr(eth_mask->h_source)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA); off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
ether_addr_copy(key + off, eth_value->h_source); ether_addr_copy(key + off, eth_value->h_source);
ether_addr_copy(mask + off, eth_mask->h_source); ether_addr_copy(mask + off, eth_mask->h_source);
*fields |= DPAA2_ETH_DIST_ETHSRC;
} }
if (!is_zero_ether_addr(eth_mask->h_dest)) { if (!is_zero_ether_addr(eth_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, eth_value->h_dest); ether_addr_copy(key + off, eth_value->h_dest);
ether_addr_copy(mask + off, eth_mask->h_dest); ether_addr_copy(mask + off, eth_mask->h_dest);
*fields |= DPAA2_ETH_DIST_ETHDST;
} }
return 0; return 0;
...@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, ...@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
struct ethtool_usrip4_spec *uip_mask, struct ethtool_usrip4_spec *uip_mask,
void *key, void *mask) void *key, void *mask, u64 *fields)
{ {
int off; int off;
u32 tmp_value, tmp_mask; u32 tmp_value, tmp_mask;
...@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, ...@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = uip_value->ip4src; *(__be32 *)(key + off) = uip_value->ip4src;
*(__be32 *)(mask + off) = uip_mask->ip4src; *(__be32 *)(mask + off) = uip_mask->ip4src;
*fields |= DPAA2_ETH_DIST_IPSRC;
} }
if (uip_mask->ip4dst) { if (uip_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = uip_value->ip4dst; *(__be32 *)(key + off) = uip_value->ip4dst;
*(__be32 *)(mask + off) = uip_mask->ip4dst; *(__be32 *)(mask + off) = uip_mask->ip4dst;
*fields |= DPAA2_ETH_DIST_IPDST;
} }
if (uip_mask->proto) { if (uip_mask->proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = uip_value->proto; *(u8 *)(key + off) = uip_value->proto;
*(u8 *)(mask + off) = uip_mask->proto; *(u8 *)(mask + off) = uip_mask->proto;
*fields |= DPAA2_ETH_DIST_IPPROTO;
} }
if (uip_mask->l4_4_bytes) { if (uip_mask->l4_4_bytes) {
...@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, ...@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = htons(tmp_value >> 16); *(__be16 *)(key + off) = htons(tmp_value >> 16);
*(__be16 *)(mask + off) = htons(tmp_mask >> 16); *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
*fields |= DPAA2_ETH_DIST_L4SRC;
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
*fields |= DPAA2_ETH_DIST_L4DST;
} }
/* Only apply the rule for IPv4 frames */ /* Only apply the rule for IPv4 frames */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP); *(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF); *(__be16 *)(mask + off) = htons(0xFFFF);
*fields |= DPAA2_ETH_DIST_ETHTYPE;
return 0; return 0;
} }
static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
struct ethtool_tcpip4_spec *l4_mask, struct ethtool_tcpip4_spec *l4_mask,
void *key, void *mask, u8 l4_proto) void *key, void *mask, u8 l4_proto, u64 *fields)
{ {
int off; int off;
...@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, ...@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = l4_value->ip4src; *(__be32 *)(key + off) = l4_value->ip4src;
*(__be32 *)(mask + off) = l4_mask->ip4src; *(__be32 *)(mask + off) = l4_mask->ip4src;
*fields |= DPAA2_ETH_DIST_IPSRC;
} }
if (l4_mask->ip4dst) { if (l4_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = l4_value->ip4dst; *(__be32 *)(key + off) = l4_value->ip4dst;
*(__be32 *)(mask + off) = l4_mask->ip4dst; *(__be32 *)(mask + off) = l4_mask->ip4dst;
*fields |= DPAA2_ETH_DIST_IPDST;
} }
if (l4_mask->psrc) { if (l4_mask->psrc) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = l4_value->psrc; *(__be16 *)(key + off) = l4_value->psrc;
*(__be16 *)(mask + off) = l4_mask->psrc; *(__be16 *)(mask + off) = l4_mask->psrc;
*fields |= DPAA2_ETH_DIST_L4SRC;
} }
if (l4_mask->pdst) { if (l4_mask->pdst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = l4_value->pdst; *(__be16 *)(key + off) = l4_value->pdst;
*(__be16 *)(mask + off) = l4_mask->pdst; *(__be16 *)(mask + off) = l4_mask->pdst;
*fields |= DPAA2_ETH_DIST_L4DST;
} }
/* Only apply the rule for IPv4 frames with the specified L4 proto */ /* Only apply the rule for IPv4 frames with the specified L4 proto */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP); *(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF); *(__be16 *)(mask + off) = htons(0xFFFF);
*fields |= DPAA2_ETH_DIST_ETHTYPE;
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = l4_proto; *(u8 *)(key + off) = l4_proto;
*(u8 *)(mask + off) = 0xFF; *(u8 *)(mask + off) = 0xFF;
*fields |= DPAA2_ETH_DIST_IPPROTO;
return 0; return 0;
} }
static int prep_ext_rule(struct ethtool_flow_ext *ext_value, static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask, struct ethtool_flow_ext *ext_mask,
void *key, void *mask) void *key, void *mask, u64 *fields)
{ {
int off; int off;
...@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value, ...@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI); off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
*(__be16 *)(key + off) = ext_value->vlan_tci; *(__be16 *)(key + off) = ext_value->vlan_tci;
*(__be16 *)(mask + off) = ext_mask->vlan_tci; *(__be16 *)(mask + off) = ext_mask->vlan_tci;
*fields |= DPAA2_ETH_DIST_VLAN;
} }
return 0; return 0;
...@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value, ...@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask, struct ethtool_flow_ext *ext_mask,
void *key, void *mask) void *key, void *mask, u64 *fields)
{ {
int off; int off;
...@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, ...@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, ext_value->h_dest); ether_addr_copy(key + off, ext_value->h_dest);
ether_addr_copy(mask + off, ext_mask->h_dest); ether_addr_copy(mask + off, ext_mask->h_dest);
*fields |= DPAA2_ETH_DIST_ETHDST;
} }
return 0; return 0;
} }
static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask) static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
u64 *fields)
{ {
int err; int err;
switch (fs->flow_type & 0xFF) { switch (fs->flow_type & 0xFF) {
case ETHER_FLOW: case ETHER_FLOW:
err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec, err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
key, mask); key, mask, fields);
break; break;
case IP_USER_FLOW: case IP_USER_FLOW:
err = prep_uip_rule(&fs->h_u.usr_ip4_spec, err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
&fs->m_u.usr_ip4_spec, key, mask); &fs->m_u.usr_ip4_spec, key, mask, fields);
break; break;
case TCP_V4_FLOW: case TCP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec, err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
key, mask, IPPROTO_TCP); key, mask, IPPROTO_TCP, fields);
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec, err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
key, mask, IPPROTO_UDP); key, mask, IPPROTO_UDP, fields);
break; break;
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
&fs->m_u.sctp_ip4_spec, key, mask, &fs->m_u.sctp_ip4_spec, key, mask,
IPPROTO_SCTP); IPPROTO_SCTP, fields);
break; break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask) ...@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
return err; return err;
if (fs->flow_type & FLOW_EXT) { if (fs->flow_type & FLOW_EXT) {
err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err) if (err)
return err; return err;
} }
if (fs->flow_type & FLOW_MAC_EXT) { if (fs->flow_type & FLOW_MAC_EXT) {
err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
fields);
if (err) if (err)
return err; return err;
} }
...@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev,
struct dpni_rule_cfg rule_cfg = { 0 }; struct dpni_rule_cfg rule_cfg = { 0 };
struct dpni_fs_action_cfg fs_act = { 0 }; struct dpni_fs_action_cfg fs_act = { 0 };
dma_addr_t key_iova; dma_addr_t key_iova;
u64 fields = 0;
void *key_buf; void *key_buf;
int err; int err;
...@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev,
fs->ring_cookie >= dpaa2_eth_queue_count(priv)) fs->ring_cookie >= dpaa2_eth_queue_count(priv))
return -EINVAL; return -EINVAL;
rule_cfg.key_size = dpaa2_eth_cls_key_size(); rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
/* allocate twice the key size, for the actual key and for mask */ /* allocate twice the key size, for the actual key and for mask */
key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL); key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
...@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM; return -ENOMEM;
/* Fill the key and mask memory areas */ /* Fill the key and mask memory areas */
err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size); err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err) if (err)
goto free_mem; goto free_mem;
if (!dpaa2_eth_fs_mask_enabled(priv)) {
/* Masking allows us to configure a maximal key during init and
* use it for all flow steering rules. Without it, we include
* in the key only the fields actually used, so we need to
* extract the others from the final key buffer.
*
* Program the FS key if needed, or return error if previously
* set key can't be used for the current rule. User needs to
* delete existing rules in this case to allow for the new one.
*/
if (!priv->rx_cls_fields) {
err = dpaa2_eth_set_cls(net_dev, fields);
if (err)
goto free_mem;
priv->rx_cls_fields = fields;
} else if (priv->rx_cls_fields != fields) {
netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
err = -EOPNOTSUPP;
goto free_mem;
}
dpaa2_eth_cls_trim_rule(key_buf, fields);
rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
}
key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) { if (dma_mapping_error(dev, key_iova)) {
...@@ -500,6 +546,7 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -500,6 +546,7 @@ static int do_cls_rule(struct net_device *net_dev,
} }
rule_cfg.key_iova = key_iova; rule_cfg.key_iova = key_iova;
if (dpaa2_eth_fs_mask_enabled(priv))
rule_cfg.mask_iova = key_iova + rule_cfg.key_size; rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
if (add) { if (add) {
...@@ -522,6 +569,17 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -522,6 +569,17 @@ static int do_cls_rule(struct net_device *net_dev,
return err; return err;
} }
static int num_rules(struct dpaa2_eth_priv *priv)
{
int i, rules = 0;
for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
if (priv->cls_rules[i].in_use)
rules++;
return rules;
}
static int update_cls_rule(struct net_device *net_dev, static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs, struct ethtool_rx_flow_spec *new_fs,
int location) int location)
...@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev, ...@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev,
return err; return err;
rule->in_use = 0; rule->in_use = 0;
if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
priv->rx_cls_fields = 0;
} }
/* If no new entry to add, return here */ /* If no new entry to add, return here */
...@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, ...@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break; break;
case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0; rxnfc->rule_cnt = 0;
for (i = 0; i < max_rules; i++) rxnfc->rule_cnt = num_rules(priv);
if (priv->cls_rules[i].in_use)
rxnfc->rule_cnt++;
rxnfc->data = max_rules; rxnfc->data = max_rules;
break; break;
case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRULE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment