Commit 4aaaf9b9 authored by Ioana Radulescu's avatar Ioana Radulescu Committed by David S. Miller

dpaa2-eth: Configure Rx flow classification key

For firmware versions that support it, configure an Rx flow
classification key at probe time.

Hardware expects all rules in the classification table to share
the same key. So we setup a key containing all supported fields
at driver init and when a user adds classification rules through
ethtool, we will just mask out the unused header fields.

Since the key composition process is the same for flow
classification and hashing, reuse existing code where possible.
Signed-off-by: default avatarIoana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f76c483a
......@@ -2089,10 +2089,31 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
return err;
}
/* Set RX hash options
/* Configure the Rx flow classification key */
static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
int err;
memset(&dist_cfg, 0, sizeof(dist_cfg));
dist_cfg.key_cfg_iova = key;
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_fs_dist failed\n");
return err;
}
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
int dpaa2_eth_set_dist_key(struct net_device *net_dev,
enum dpaa2_eth_rx_dist type, u64 flags)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
......@@ -2103,19 +2124,20 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
int i;
int err = 0;
if (!dpaa2_eth_hash_enabled(priv)) {
dev_dbg(dev, "Hashing support is not enabled\n");
return -EOPNOTSUPP;
}
memset(&cls_cfg, 0, sizeof(cls_cfg));
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
if (!(flags & dist_fields[i].rxnfc_field))
continue;
/* For Rx hashing key we set only the selected fields.
* For Rx flow classification key we set all supported fields
*/
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (!(flags & dist_fields[i].rxnfc_field))
continue;
rx_hash_fields |= dist_fields[i].rxnfc_field;
}
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
......@@ -2127,8 +2149,6 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
key->extract.from_hdr.type = DPKG_FULL_FIELD;
key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
rx_hash_fields |= dist_fields[i].rxnfc_field;
}
dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
......@@ -2150,14 +2170,18 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
goto free_key;
}
if (dpaa2_eth_has_legacy_dist(priv))
err = config_legacy_hash_key(priv, key_iova);
else
err = config_hash_key(priv, key_iova);
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (dpaa2_eth_has_legacy_dist(priv))
err = config_legacy_hash_key(priv, key_iova);
else
err = config_hash_key(priv, key_iova);
} else {
err = config_cls_key(priv, key_iova);
}
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
DMA_TO_DEVICE);
if (!err)
if (!err && type == DPAA2_ETH_RX_DIST_HASH)
priv->rx_hash_fields = rx_hash_fields;
free_key:
......@@ -2165,6 +2189,42 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
return err;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP;
return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
}
static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
/* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) {
dev_dbg(dev, "Rx cls not supported by current MC version\n");
return -EOPNOTSUPP;
}
if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
!(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP;
}
if (!dpaa2_eth_hash_enabled(priv)) {
dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
return -EOPNOTSUPP;
}
priv->rx_cls_enabled = 1;
return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
}
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
......@@ -2194,6 +2254,13 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure hashing\n");
/* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime
*/
err = dpaa2_eth_set_cls(priv);
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n");
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
err_cfg.set_frame_annotation = 1;
......
......@@ -340,6 +340,7 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
u8 rx_cls_enabled;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
......@@ -377,6 +378,11 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0)
enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_HASH,
DPAA2_ETH_RX_DIST_CLS
};
/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
* the buffer also needs space for its shared info struct, and we need
* to allocate enough to accommodate hardware alignment restrictions
......
......@@ -82,6 +82,7 @@
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
/* Macros for accessing command fields smaller than 1byte */
......@@ -517,6 +518,17 @@ struct dpni_rsp_get_api_version {
__le16 minor;
};
#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_fs_dist {
__le16 dist_size;
u8 enable;
u8 tc;
__le16 miss_flow_id;
__le16 pad;
__le64 key_cfg_iova;
};
#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_hash_dist {
......
......@@ -1599,6 +1599,48 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
/**
* dpni_set_rx_fs_dist() - Set Rx flow steering distribution
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: Distribution configuration
*
* If the FS is already enabled with a previous call the classification
* key will be changed but all the table rules are kept. If the
* existing rules do not match the key the results will not be
* predictable. It is the user responsibility to keep key integrity.
* If cfg.enable is set to 1 the command will create a flow steering table
* and will classify packets according to this table. The packets that
* miss all the table rules will be classified according to settings
* made in dpni_set_rx_hash_dist()
* If cfg.enable is set to 0 the command will clear flow steering table.
* The packets will be classified according to settings made in
* dpni_set_rx_hash_dist()
*/
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rx_dist_cfg *cfg)
{
struct dpni_cmd_set_rx_fs_dist *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
cmd_params->tc = cfg->tc;
cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_set_rx_hash_dist() - Set Rx hash distribution
* @mc_io: Pointer to MC portal's I/O object
......
......@@ -657,6 +657,11 @@ struct dpni_rx_dist_cfg {
u16 fs_miss_flow_id;
};
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rx_dist_cfg *cfg);
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment