Commit 9bbc8be2 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-01-22

This series provides updates to mlx5 driver.
1) Misc small cleanups
2) Some SW steering updates including header copy support
3) Full ethtool statistics support for E-Switch uplink representor
Some refactoring was required to share the bare-metal NIC ethtool
stats with the Uplink representor. On Top of this Vlad converts the
ethtool stats support in E-Swtich vports representors to use the mlx5e
"stats groups" infrastructure and then applied all applicable stats
to the uplink representor netdev.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 790e0114 7c453526
......@@ -892,6 +892,8 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
......@@ -964,7 +966,6 @@ struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
......
......@@ -218,13 +218,9 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
int i, num_stats = 0;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < mlx5e_num_stats_grps; i++)
num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
return num_stats;
return mlx5e_stats_total_num(priv);
case ETH_SS_PRIV_FLAGS:
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
......@@ -242,14 +238,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return mlx5e_ethtool_get_sset_count(priv, sset);
}
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{
int i, idx = 0;
for (i = 0; i < mlx5e_num_stats_grps; i++)
idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
{
int i;
......@@ -268,7 +256,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
mlx5e_fill_stats_strings(priv, data);
mlx5e_stats_fill_strings(priv, data);
break;
}
}
......@@ -283,14 +271,13 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
int i, idx = 0;
int idx = 0;
mutex_lock(&priv->state_lock);
mlx5e_update_stats(priv);
mlx5e_stats_update(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
mlx5e_stats_fill(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
......
......@@ -159,23 +159,14 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
int i;
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
if (mlx5e_stats_grps[i].update_stats)
mlx5e_stats_grps[i].update_stats(priv);
}
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
if (mlx5e_stats_grps[i].update_stats_mask &
for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
if (mlx5e_nic_stats_grps[i]->update_stats_mask &
MLX5E_NDO_UPDATE_STATS)
mlx5e_stats_grps[i].update_stats(priv);
mlx5e_nic_stats_grps[i]->update_stats(priv);
}
static void mlx5e_update_stats_work(struct work_struct *work)
......@@ -4878,6 +4869,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
......@@ -5195,6 +5188,8 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
};
/* mlx5e generic netdev management API (move to en_common.c) */
......
......@@ -117,24 +117,71 @@ static const struct counter_desc vport_rep_stats_desc[] = {
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
static void mlx5e_rep_get_strings(struct net_device *dev,
u32 stringset, uint8_t *data)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
int i, j;
return NUM_VPORT_REP_SW_COUNTERS;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
strcpy(data + (i * ETH_GSTRING_LEN),
strcpy(data + (idx++) * ETH_GSTRING_LEN,
sw_rep_stats_desc[i].format);
for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
strcpy(data + (i * ETH_GSTRING_LEN),
vport_rep_stats_desc[j].format);
break;
}
return idx;
}
static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
{
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
struct rtnl_link_stats64 stats64 = {};
memset(s, 0, sizeof(*s));
mlx5e_fold_sw_stats64(priv, &stats64);
s->rx_packets = stats64.rx_packets;
s->rx_bytes = stats64.rx_bytes;
s->tx_packets = stats64.tx_packets;
s->tx_bytes = stats64.tx_bytes;
s->tx_queue_dropped = stats64.tx_dropped;
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
{
return NUM_VPORT_REP_HW_COUNTERS;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
{
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
{
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
vport_rep_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
......@@ -157,64 +204,33 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct rtnl_link_stats64 *vport_stats;
mlx5e_grp_802_3_update_stats(priv);
vport_stats = &priv->stats.vf_vport;
vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
}
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
static void mlx5e_rep_get_strings(struct net_device *dev,
u32 stringset, uint8_t *data)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
struct rtnl_link_stats64 stats64 = {};
memset(s, 0, sizeof(*s));
mlx5e_fold_sw_stats64(priv, &stats64);
struct mlx5e_priv *priv = netdev_priv(dev);
s->rx_packets = stats64.rx_packets;
s->rx_bytes = stats64.rx_bytes;
s->tx_packets = stats64.tx_packets;
s->tx_bytes = stats64.tx_bytes;
s->tx_queue_dropped = stats64.tx_dropped;
switch (stringset) {
case ETH_SS_STATS:
mlx5e_stats_fill_strings(priv, data);
break;
}
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int i, j;
if (!data)
return;
mutex_lock(&priv->state_lock);
mlx5e_rep_update_sw_counters(priv);
priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i);
for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
vport_rep_stats_desc, j);
mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
return mlx5e_stats_total_num(priv);
default:
return -EOPNOTSUPP;
}
......@@ -1674,19 +1690,32 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_close_drop_rq(&priv->drop_rq);
}
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv;
int err;
int err = mlx5e_init_rep_rx(priv);
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
if (err)
return err;
}
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_create_q_counters(priv);
return 0;
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
mlx5e_destroy_q_counters(priv);
mlx5e_cleanup_rep_rx(priv);
}
static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
struct mlx5_rep_uplink_priv *uplink_priv;
struct net_device *netdev;
struct mlx5e_priv *priv;
int err;
netdev = rpriv->netdev;
priv = netdev_priv(netdev);
uplink_priv = &rpriv->uplink_priv;
mutex_init(&uplink_priv->unready_flows_lock);
......@@ -1695,7 +1724,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
/* init shared tc flow table */
err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
if (err)
goto destroy_tises;
return err;
mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
......@@ -1707,24 +1736,40 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
goto tc_esw_cleanup;
}
}
return 0;
tc_esw_cleanup:
mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
destroy_tises:
mlx5e_destroy_tises(priv);
return err;
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
int err;
mlx5e_destroy_tises(priv);
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
return err;
}
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
goto destroy_tises;
}
return 0;
destroy_tises:
mlx5e_destroy_tises(priv);
return err;
}
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
/* clean indirect TC block notifications */
unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
mlx5e_rep_indr_clean_block_privs(rpriv);
......@@ -1732,7 +1777,16 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
/* delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_destroy_tises(priv);
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
......@@ -1812,6 +1866,43 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5_lag_remove(mdev);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
/* The stats groups order is opposite to the update_stats() order calls */
static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
&MLX5E_STATS_GRP(sw_rep),
&MLX5E_STATS_GRP(vport_rep),
};
static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
{
return ARRAY_SIZE(mlx5e_rep_stats_grps);
}
/* The stats groups order is opposite to the update_stats() order calls */
static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(sw),
&MLX5E_STATS_GRP(qcnt),
&MLX5E_STATS_GRP(vnic_env),
&MLX5E_STATS_GRP(vport),
&MLX5E_STATS_GRP(802_3),
&MLX5E_STATS_GRP(2863),
&MLX5E_STATS_GRP(2819),
&MLX5E_STATS_GRP(phy),
&MLX5E_STATS_GRP(eth_ext),
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
{
return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
}
static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
......@@ -1821,29 +1912,33 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_rep_update_hw_counters,
.update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_rx = mlx5e_init_ul_rep_rx,
.cleanup_rx = mlx5e_cleanup_ul_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_uplink_rep_update_hw_counters,
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
static bool
......
......@@ -35,6 +35,58 @@
#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
return !priv->profile->stats_grps_num ? 0 :
priv->profile->stats_grps_num(priv);
}
unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
unsigned int total = 0;
int i;
for (i = 0; i < num_stats_grps; i++)
total += stats_grps[i]->get_num_stats(priv);
return total;
}
void mlx5e_stats_update(struct mlx5e_priv *priv)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
int i;
for (i = num_stats_grps - 1; i >= 0; i--)
if (stats_grps[i]->update_stats)
stats_grps[i]->update_stats(priv);
}
void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
int i;
for (i = 0; i < num_stats_grps; i++)
idx = stats_grps[i]->fill_stats(priv, data, idx);
}
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
int i, idx = 0;
for (i = 0; i < num_stats_grps; i++)
idx = stats_grps[i]->fill_strings(priv, data, idx);
}
/* Concrete NIC Stats */
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
......@@ -146,12 +198,12 @@ static const struct counter_desc sw_stats_desc[] = {
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
{
return NUM_SW_COUNTERS;
}
static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
{
int i;
......@@ -160,7 +212,7 @@ static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
{
int i;
......@@ -169,7 +221,7 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
......@@ -315,7 +367,7 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
......@@ -328,7 +380,7 @@ static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
......@@ -343,7 +395,7 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
......@@ -356,7 +408,7 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
......@@ -391,14 +443,13 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
}
static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
{
int i;
......@@ -412,8 +463,7 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
{
int i;
......@@ -427,7 +477,7 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
......@@ -490,13 +540,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
{
return NUM_VPORT_COUNTERS;
}
static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
{
int i;
......@@ -505,8 +554,7 @@ static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
{
int i;
......@@ -516,7 +564,7 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
......@@ -555,13 +603,12 @@ static const struct counter_desc pport_802_3_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
{
return NUM_PPORT_802_3_COUNTERS;
}
static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
{
int i;
......@@ -570,8 +617,7 @@ static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
{
int i;
......@@ -584,7 +630,7 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -612,13 +658,12 @@ static const struct counter_desc pport_2863_stats_desc[] = {
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
{
return NUM_PPORT_2863_COUNTERS;
}
static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
{
int i;
......@@ -627,8 +672,7 @@ static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
{
int i;
......@@ -638,7 +682,7 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -673,13 +717,12 @@ static const struct counter_desc pport_2819_stats_desc[] = {
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
{
return NUM_PPORT_2819_COUNTERS;
}
static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
{
int i;
......@@ -688,8 +731,7 @@ static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
{
int i;
......@@ -699,7 +741,7 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -737,7 +779,7 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
......@@ -754,8 +796,7 @@ static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
......@@ -777,7 +818,7 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
......@@ -803,7 +844,7 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -833,7 +874,7 @@ static const struct counter_desc pport_eth_ext_stats_desc[] = {
#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
{
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
return NUM_PPORT_ETH_EXT_COUNTERS;
......@@ -841,8 +882,7 @@ static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
return 0;
}
static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
{
int i;
......@@ -853,8 +893,7 @@ static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
{
int i;
......@@ -866,7 +905,7 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -907,7 +946,7 @@ static const struct counter_desc pcie_perf_stall_stats_desc[] = {
#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
{
int num_stats = 0;
......@@ -923,8 +962,7 @@ static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
{
int i;
......@@ -945,8 +983,7 @@ static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
{
int i;
......@@ -970,7 +1007,7 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -1018,8 +1055,7 @@ static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
}
static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
u8 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i, prio;
......@@ -1039,8 +1075,7 @@ static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *pri
return idx;
}
static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
u64 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
{
struct mlx5e_pport_stats *pport = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -1115,13 +1150,13 @@ static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
}
}
static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
{
return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
}
static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
{
mlx5e_grp_per_tc_prio_update_stats(priv);
mlx5e_grp_per_tc_congest_prio_update_stats(priv);
......@@ -1296,29 +1331,27 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
{
return mlx5e_grp_per_prio_traffic_get_num_stats() +
mlx5e_grp_per_prio_pfc_get_num_stats(priv);
}
static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
return idx;
}
static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
return idx;
}
static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
......@@ -1353,13 +1386,12 @@ static const struct counter_desc mlx5e_pme_error_desc[] = {
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
{
return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
}
static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
{
int i;
......@@ -1372,8 +1404,7 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
{
struct mlx5_pme_stats pme_stats;
int i;
......@@ -1391,45 +1422,46 @@ static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
{
return mlx5e_ipsec_get_count(priv);
}
static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
{
return idx + mlx5e_ipsec_get_strings(priv,
data + idx * ETH_GSTRING_LEN);
}
static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
{
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
{
mlx5e_ipsec_update_stats(priv);
}
static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
}
static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
return idx + mlx5e_tls_get_stats(priv, data + idx);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
......@@ -1563,7 +1595,7 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
int max_nch = priv->max_nch;
......@@ -1576,8 +1608,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
(NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
......@@ -1619,8 +1650,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
......@@ -1668,104 +1698,46 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
MLX5E_DEFINE_STATS_GRP(sw, 0);
MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
MLX5E_DEFINE_STATS_GRP(2863, 0);
MLX5E_DEFINE_STATS_GRP(2819, 0);
MLX5E_DEFINE_STATS_GRP(phy, 0);
MLX5E_DEFINE_STATS_GRP(pcie, 0);
MLX5E_DEFINE_STATS_GRP(per_prio, 0);
MLX5E_DEFINE_STATS_GRP(pme, 0);
MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
/* The stats groups order is opposite to the update_stats() order calls */
const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
{
.get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats,
.update_stats = mlx5e_grp_sw_update_stats,
},
{
.get_num_stats = mlx5e_grp_q_get_num_stats,
.fill_strings = mlx5e_grp_q_fill_strings,
.fill_stats = mlx5e_grp_q_fill_stats,
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
.update_stats = mlx5e_grp_q_update_stats,
},
{
.get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
.fill_strings = mlx5e_grp_vnic_env_fill_strings,
.fill_stats = mlx5e_grp_vnic_env_fill_stats,
.update_stats = mlx5e_grp_vnic_env_update_stats,
},
{
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
.update_stats = mlx5e_grp_vport_update_stats,
},
{
.get_num_stats = mlx5e_grp_802_3_get_num_stats,
.fill_strings = mlx5e_grp_802_3_fill_strings,
.fill_stats = mlx5e_grp_802_3_fill_stats,
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
.update_stats = mlx5e_grp_802_3_update_stats,
},
{
.get_num_stats = mlx5e_grp_2863_get_num_stats,
.fill_strings = mlx5e_grp_2863_fill_strings,
.fill_stats = mlx5e_grp_2863_fill_stats,
.update_stats = mlx5e_grp_2863_update_stats,
},
{
.get_num_stats = mlx5e_grp_2819_get_num_stats,
.fill_strings = mlx5e_grp_2819_fill_strings,
.fill_stats = mlx5e_grp_2819_fill_stats,
.update_stats = mlx5e_grp_2819_update_stats,
},
{
.get_num_stats = mlx5e_grp_phy_get_num_stats,
.fill_strings = mlx5e_grp_phy_fill_strings,
.fill_stats = mlx5e_grp_phy_fill_stats,
.update_stats = mlx5e_grp_phy_update_stats,
},
{
.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
.fill_strings = mlx5e_grp_eth_ext_fill_strings,
.fill_stats = mlx5e_grp_eth_ext_fill_stats,
.update_stats = mlx5e_grp_eth_ext_update_stats,
},
{
.get_num_stats = mlx5e_grp_pcie_get_num_stats,
.fill_strings = mlx5e_grp_pcie_fill_strings,
.fill_stats = mlx5e_grp_pcie_fill_stats,
.update_stats = mlx5e_grp_pcie_update_stats,
},
{
.get_num_stats = mlx5e_grp_per_prio_get_num_stats,
.fill_strings = mlx5e_grp_per_prio_fill_strings,
.fill_stats = mlx5e_grp_per_prio_fill_stats,
.update_stats = mlx5e_grp_per_prio_update_stats,
},
{
.get_num_stats = mlx5e_grp_pme_get_num_stats,
.fill_strings = mlx5e_grp_pme_fill_strings,
.fill_stats = mlx5e_grp_pme_fill_stats,
},
{
.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
.fill_strings = mlx5e_grp_ipsec_fill_strings,
.fill_stats = mlx5e_grp_ipsec_fill_stats,
.update_stats = mlx5e_grp_ipsec_update_stats,
},
{
.get_num_stats = mlx5e_grp_tls_get_num_stats,
.fill_strings = mlx5e_grp_tls_fill_strings,
.fill_stats = mlx5e_grp_tls_fill_stats,
},
{
.get_num_stats = mlx5e_grp_channels_get_num_stats,
.fill_strings = mlx5e_grp_channels_fill_strings,
.fill_stats = mlx5e_grp_channels_fill_stats,
},
{
.get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
.fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
.fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
.update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
},
mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(sw),
&MLX5E_STATS_GRP(qcnt),
&MLX5E_STATS_GRP(vnic_env),
&MLX5E_STATS_GRP(vport),
&MLX5E_STATS_GRP(802_3),
&MLX5E_STATS_GRP(2863),
&MLX5E_STATS_GRP(2819),
&MLX5E_STATS_GRP(phy),
&MLX5E_STATS_GRP(eth_ext),
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(ipsec),
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
};
const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
{
return ARRAY_SIZE(mlx5e_nic_stats_grps);
}
......@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_EN_STATS_H__
#define __MLX5_EN_STATS_H__
......@@ -55,6 +56,56 @@ struct counter_desc {
size_t offset; /* Byte offset */
};
enum {
MLX5E_NDO_UPDATE_STATS = BIT(0x1),
};
struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
void (*update_stats)(struct mlx5e_priv *priv);
};
typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
#define MLX5E_DECLARE_STATS_GRP(grp) \
const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
MLX5E_DECLARE_STATS_GRP(grp) = { \
.get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
.fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
.fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
.update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
.update_stats_mask = mask, \
}
unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
void mlx5e_stats_update(struct mlx5e_priv *priv);
void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
/* Concrete NIC Stats */
struct mlx5e_sw_stats {
u64 rx_packets;
u64 rx_bytes;
......@@ -322,22 +373,22 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
enum {
MLX5E_NDO_UPDATE_STATS = BIT(0x1),
};
struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
void (*update_stats)(struct mlx5e_priv *priv);
};
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
extern MLX5E_DECLARE_STATS_GRP(sw);
extern MLX5E_DECLARE_STATS_GRP(qcnt);
extern MLX5E_DECLARE_STATS_GRP(vnic_env);
extern MLX5E_DECLARE_STATS_GRP(vport);
extern MLX5E_DECLARE_STATS_GRP(802_3);
extern MLX5E_DECLARE_STATS_GRP(2863);
extern MLX5E_DECLARE_STATS_GRP(2819);
extern MLX5E_DECLARE_STATS_GRP(phy);
extern MLX5E_DECLARE_STATS_GRP(eth_ext);
extern MLX5E_DECLARE_STATS_GRP(pcie);
extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
#endif /* __MLX5_EN_STATS_H__ */
......@@ -1810,6 +1810,40 @@ static void *get_match_headers_value(u32 flags,
outer_headers);
}
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct net_device *ingress_dev;
struct flow_match_meta match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
return 0;
flow_rule_match_meta(rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EINVAL;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
return -EINVAL;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
return -EINVAL;
}
return 0;
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
......@@ -1830,6 +1864,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
match_level = outer_match_level;
......@@ -1873,6 +1908,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
spec);
}
err = mlx5e_flower_parse_meta(filter_dev, f);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
......
......@@ -32,7 +32,7 @@
* pools.
*/
#define ESW_SIZE (16 * 1024 * 1024)
const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
4 * 1024, };
......
......@@ -419,6 +419,28 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
}
/* The stats groups order is opposite to the update_stats() order calls */
static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
&MLX5E_STATS_GRP(sw),
&MLX5E_STATS_GRP(qcnt),
&MLX5E_STATS_GRP(vnic_env),
&MLX5E_STATS_GRP(vport),
&MLX5E_STATS_GRP(802_3),
&MLX5E_STATS_GRP(2863),
&MLX5E_STATS_GRP(2819),
&MLX5E_STATS_GRP(phy),
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
};
static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
{
return ARRAY_SIZE(mlx5i_stats_grps);
}
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
......@@ -435,6 +457,8 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
};
/* mlx5i netdev NDos */
......
......@@ -677,9 +677,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
goto out_invalid_arg;
}
if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
mlx5_core_warn_once(dmn->mdev,
"Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
"Destination table level should be higher than source table\n");
goto out_invalid_arg;
"Connecting table at level %d to a destination table at level %d\n",
matcher->tbl->level,
action->dest_tbl.tbl->level);
}
attr.final_icm_addr = rx_rule ?
action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
......@@ -1314,58 +1317,85 @@ dr_action_modify_get_hw_info(u16 sw_field)
}
static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
u8 offset, length, max_length, action;
u8 max_length;
u16 sw_field;
u8 hw_opcode;
u32 data;
/* Get SW modify action data */
action = MLX5_GET(set_action_in, sw_action, action_type);
length = MLX5_GET(set_action_in, sw_action, length);
offset = MLX5_GET(set_action_in, sw_action, offset);
sw_field = MLX5_GET(set_action_in, sw_action, field);
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify action invalid field given\n");
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
}
max_length = hw_action_info->end - hw_action_info->start + 1;
switch (action) {
case MLX5_ACTION_TYPE_SET:
hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET;
MLX5_SET(dr_action_hw_set, hw_action,
opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
hw_action_info->start);
/* PRM defines that length zero specific length of 32bits */
if (!length)
length = 32;
MLX5_SET(dr_action_hw_set, hw_action, destination_length,
max_length == 32 ? 0 : max_length);
if (length + offset > max_length) {
mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
*ret_hw_info = hw_action_info;
return 0;
}
static int
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
u8 offset, length, max_length;
u16 sw_field;
u32 data;
/* Get SW modify action data */
length = MLX5_GET(set_action_in, sw_action, length);
offset = MLX5_GET(set_action_in, sw_action, offset);
sw_field = MLX5_GET(set_action_in, sw_action, field);
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
return -EINVAL;
}
break;
case MLX5_ACTION_TYPE_ADD:
hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD;
offset = 0;
length = max_length;
break;
/* PRM defines that length zero specific length of 32bits */
length = length ? length : 32;
default:
mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
return -EOPNOTSUPP;
max_length = hw_action_info->end - hw_action_info->start + 1;
if (length + offset > max_length) {
mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
return -EINVAL;
}
MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode);
MLX5_SET(dr_action_hw_set, hw_action,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
......@@ -1384,48 +1414,236 @@ dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
}
static int
dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn,
const __be64 *sw_action)
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_dst_hw_info,
const struct dr_action_modify_field_conv **ret_src_hw_info)
{
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
const struct dr_action_modify_field_conv *hw_dst_action_info;
const struct dr_action_modify_field_conv *hw_src_action_info;
u16 src_field, dst_field;
/* Get SW modify action data */
src_field = MLX5_GET(copy_action_in, sw_action, src_field);
dst_field = MLX5_GET(copy_action_in, sw_action, dst_field);
src_offset = MLX5_GET(copy_action_in, sw_action, src_offset);
dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset);
length = MLX5_GET(copy_action_in, sw_action, length);
/* Convert SW data to HW modify action format */
hw_src_action_info = dr_action_modify_get_hw_info(src_field);
hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
if (!hw_src_action_info || !hw_dst_action_info) {
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
return -EINVAL;
}
/* PRM defines that length zero specific length of 32bits */
length = length ? length : 32;
src_max_length = hw_src_action_info->end -
hw_src_action_info->start + 1;
dst_max_length = hw_dst_action_info->end -
hw_dst_action_info->start + 1;
if (length + src_offset > src_max_length ||
length + dst_offset > dst_max_length) {
mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
return -EINVAL;
}
MLX5_SET(dr_action_hw_copy, hw_action,
opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
hw_dst_action_info->hw_field);
MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
hw_dst_action_info->start + dst_offset);
MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
length == 32 ? 0 : length);
MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
hw_src_action_info->hw_field);
MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
hw_src_action_info->start + dst_offset);
*ret_dst_hw_info = hw_dst_action_info;
*ret_src_hw_info = hw_src_action_info;
return 0;
}
static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_dst_hw_info,
const struct dr_action_modify_field_conv **ret_src_hw_info)
{
u16 sw_field;
u8 action;
int ret;
sw_field = MLX5_GET(set_action_in, sw_action, field);
*hw_action = 0;
*ret_src_hw_info = NULL;
/* Get SW modify action type */
action = MLX5_GET(set_action_in, sw_action, action_type);
/* Check if SW field is supported in current domain (RX/TX) */
if (action == MLX5_ACTION_TYPE_SET) {
switch (action) {
case MLX5_ACTION_TYPE_SET:
ret = dr_action_modify_sw_to_hw_set(dmn, sw_action,
hw_action,
ret_dst_hw_info);
break;
case MLX5_ACTION_TYPE_ADD:
ret = dr_action_modify_sw_to_hw_add(dmn, sw_action,
hw_action,
ret_dst_hw_info);
break;
case MLX5_ACTION_TYPE_COPY:
ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action,
hw_action,
ret_dst_hw_info,
ret_src_hw_info);
break;
default:
mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
ret = -EOPNOTSUPP;
}
return ret;
}
static int
dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
struct mlx5dr_domain *dmn = action->rewrite.dmn;
if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
action->rewrite.allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
sw_field);
return -EINVAL;
}
}
if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
} else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
action->rewrite.allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
sw_field);
return -EINVAL;
}
}
} else if (action == MLX5_ACTION_TYPE_ADD) {
if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
return -EINVAL;
}
return 0;
}
static int
dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
struct mlx5dr_domain *dmn = action->rewrite.dmn;
if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field);
mlx5dr_dbg(dmn, "Unsupported field %d for add action\n",
sw_field);
return -EINVAL;
}
} else {
mlx5dr_info(dmn, "Unsupported action %d modify action\n", action);
return -EOPNOTSUPP;
return 0;
}
static int
dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
struct mlx5dr_domain *dmn = action->rewrite.dmn;
u16 sw_fields[2];
int i;
sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field);
sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field);
for (i = 0; i < 2; i++) {
if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
action->rewrite.allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
sw_fields[i]);
return -EINVAL;
}
} else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
action->rewrite.allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
sw_fields[i]);
return -EINVAL;
}
}
}
if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
return -EINVAL;
}
return 0;
}
static int
dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
struct mlx5dr_domain *dmn = action->rewrite.dmn;
u8 action_type;
int ret;
action_type = MLX5_GET(set_action_in, sw_action, action_type);
switch (action_type) {
case MLX5_ACTION_TYPE_SET:
ret = dr_action_modify_check_set_field_limitation(action,
sw_action);
break;
case MLX5_ACTION_TYPE_ADD:
ret = dr_action_modify_check_add_field_limitation(action,
sw_action);
break;
case MLX5_ACTION_TYPE_COPY:
ret = dr_action_modify_check_copy_field_limitation(action,
sw_action);
break;
default:
mlx5dr_info(dmn, "Unsupported action %d modify action\n",
action_type);
ret = -EOPNOTSUPP;
}
return ret;
}
static bool
dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
{
......@@ -1434,7 +1652,7 @@ dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
__be64 sw_actions[],
......@@ -1442,20 +1660,26 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
u32 *num_hw_actions,
bool *modify_ttl)
{
const struct dr_action_modify_field_conv *hw_action_info;
const struct dr_action_modify_field_conv *hw_dst_action_info;
const struct dr_action_modify_field_conv *hw_src_action_info;
u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
*modify_ttl = false;
action->rewrite.allow_rx = 1;
action->rewrite.allow_tx = 1;
for (i = 0; i < num_sw_actions; i++) {
sw_action = &sw_actions[i];
ret = dr_action_modify_check_field_limitation(dmn, sw_action);
ret = dr_action_modify_check_field_limitation(action,
sw_action);
if (ret)
return ret;
......@@ -1466,32 +1690,35 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
ret = dr_action_modify_sw_to_hw(dmn,
sw_action,
&hw_action,
&hw_action_info);
&hw_dst_action_info,
&hw_src_action_info);
if (ret)
return ret;
/* Due to a HW limitation we cannot modify 2 different L3 types */
if (l3_type && hw_action_info->l3_type &&
hw_action_info->l3_type != l3_type) {
if (l3_type && hw_dst_action_info->l3_type &&
hw_dst_action_info->l3_type != l3_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
return -EINVAL;
}
if (hw_action_info->l3_type)
l3_type = hw_action_info->l3_type;
if (hw_dst_action_info->l3_type)
l3_type = hw_dst_action_info->l3_type;
/* Due to a HW limitation we cannot modify two different L4 types */
if (l4_type && hw_action_info->l4_type &&
hw_action_info->l4_type != l4_type) {
if (l4_type && hw_dst_action_info->l4_type &&
hw_dst_action_info->l4_type != l4_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
return -EINVAL;
}
if (hw_action_info->l4_type)
l4_type = hw_action_info->l4_type;
if (hw_dst_action_info->l4_type)
l4_type = hw_dst_action_info->l4_type;
/* HW reads and executes two actions at once this means we
* need to create a gap if two actions access the same field
*/
if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) {
if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field ||
(hw_src_action_info &&
hw_field == hw_src_action_info->hw_field))) {
/* Check if after gap insertion the total number of HW
* modify actions doesn't exceeds the limit
*/
......@@ -1501,7 +1728,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
return -EINVAL;
}
}
hw_field = hw_action_info->hw_field;
hw_field = hw_dst_action_info->hw_field;
hw_actions[hw_idx] = hw_action;
hw_idx++;
......@@ -1544,7 +1771,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
goto free_chunk;
}
ret = dr_actions_convert_modify_header(dmn,
ret = dr_actions_convert_modify_header(action,
max_hw_actions,
num_sw_actions,
actions,
......
......@@ -32,6 +32,7 @@ enum {
};
enum {
MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
};
......@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits {
u8 inline_data[0x20];
};
struct mlx5_ifc_dr_action_hw_copy_bits {
u8 opcode[0x8];
u8 destination_field_code[0x8];
u8 reserved_at_10[0x2];
u8 destination_left_shifter[0x6];
u8 reserved_at_18[0x2];
u8 destination_length[0x6];
u8 reserved_at_20[0x8];
u8 source_field_code[0x8];
u8 reserved_at_30[0x2];
u8 source_left_shifter[0x6];
u8 reserved_at_38[0x8];
};
#endif /* MLX5_IFC_DR_H */
......@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix);
pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment