Commit 9bbc8be2 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-01-22

This series provides updates to mlx5 driver.
1) Misc small cleanups
2) Some SW steering updates including header copy support
3) Full ethtool statistics support for E-Switch uplink representor
Some refactoring was required to share the bare-metal NIC ethtool
stats with the Uplink representor. On Top of this Vlad converts the
ethtool stats support in E-Swtich vports representors to use the mlx5e
"stats groups" infrastructure and then applied all applicable stats
to the uplink representor netdev.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 790e0114 7c453526
...@@ -892,6 +892,8 @@ struct mlx5e_profile { ...@@ -892,6 +892,8 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv); int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv); void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv); void (*update_carrier)(struct mlx5e_priv *priv);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
struct { struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
...@@ -964,7 +966,6 @@ struct sk_buff * ...@@ -964,7 +966,6 @@ struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
......
...@@ -218,13 +218,9 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS]; ...@@ -218,13 +218,9 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{ {
int i, num_stats = 0;
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < mlx5e_num_stats_grps; i++) return mlx5e_stats_total_num(priv);
num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
return num_stats;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return MLX5E_NUM_PFLAGS; return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST: case ETH_SS_TEST:
...@@ -242,14 +238,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) ...@@ -242,14 +238,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return mlx5e_ethtool_get_sset_count(priv, sset); return mlx5e_ethtool_get_sset_count(priv, sset);
} }
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{
int i, idx = 0;
for (i = 0; i < mlx5e_num_stats_grps; i++)
idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
{ {
int i; int i;
...@@ -268,7 +256,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) ...@@ -268,7 +256,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break; break;
case ETH_SS_STATS: case ETH_SS_STATS:
mlx5e_fill_stats_strings(priv, data); mlx5e_stats_fill_strings(priv, data);
break; break;
} }
} }
...@@ -283,14 +271,13 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data) ...@@ -283,14 +271,13 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
int i, idx = 0; int idx = 0;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5e_update_stats(priv); mlx5e_stats_update(priv);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++) mlx5e_stats_fill(priv, data, idx);
idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
} }
static void mlx5e_get_ethtool_stats(struct net_device *dev, static void mlx5e_get_ethtool_stats(struct net_device *dev,
......
...@@ -159,23 +159,14 @@ static void mlx5e_update_carrier_work(struct work_struct *work) ...@@ -159,23 +159,14 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
} }
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
int i;
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
if (mlx5e_stats_grps[i].update_stats)
mlx5e_stats_grps[i].update_stats(priv);
}
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{ {
int i; int i;
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
if (mlx5e_stats_grps[i].update_stats_mask & if (mlx5e_nic_stats_grps[i]->update_stats_mask &
MLX5E_NDO_UPDATE_STATS) MLX5E_NDO_UPDATE_STATS)
mlx5e_stats_grps[i].update_stats(priv); mlx5e_nic_stats_grps[i]->update_stats(priv);
} }
static void mlx5e_update_stats_work(struct work_struct *work) static void mlx5e_update_stats_work(struct work_struct *work)
...@@ -4878,6 +4869,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4878,6 +4869,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM; NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
} }
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
...@@ -5195,6 +5188,8 @@ static const struct mlx5e_profile mlx5e_nic_profile = { ...@@ -5195,6 +5188,8 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC, .max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
}; };
/* mlx5e generic netdev management API (move to en_common.c) */ /* mlx5e generic netdev management API (move to en_common.c) */
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef __MLX5_EN_STATS_H__ #ifndef __MLX5_EN_STATS_H__
#define __MLX5_EN_STATS_H__ #define __MLX5_EN_STATS_H__
...@@ -55,6 +56,56 @@ struct counter_desc { ...@@ -55,6 +56,56 @@ struct counter_desc {
size_t offset; /* Byte offset */ size_t offset; /* Byte offset */
}; };
enum {
MLX5E_NDO_UPDATE_STATS = BIT(0x1),
};
struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
void (*update_stats)(struct mlx5e_priv *priv);
};
typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
#define MLX5E_DECLARE_STATS_GRP(grp) \
const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
MLX5E_DECLARE_STATS_GRP(grp) = { \
.get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
.fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
.fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
.update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
.update_stats_mask = mask, \
}
unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
void mlx5e_stats_update(struct mlx5e_priv *priv);
void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
/* Concrete NIC Stats */
struct mlx5e_sw_stats { struct mlx5e_sw_stats {
u64 rx_packets; u64 rx_packets;
u64 rx_bytes; u64 rx_bytes;
...@@ -322,22 +373,22 @@ struct mlx5e_stats { ...@@ -322,22 +373,22 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie; struct mlx5e_pcie_stats pcie;
}; };
enum { extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
MLX5E_NDO_UPDATE_STATS = BIT(0x1), unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
};
struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
void (*update_stats)(struct mlx5e_priv *priv);
};
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv); extern MLX5E_DECLARE_STATS_GRP(sw);
extern MLX5E_DECLARE_STATS_GRP(qcnt);
extern MLX5E_DECLARE_STATS_GRP(vnic_env);
extern MLX5E_DECLARE_STATS_GRP(vport);
extern MLX5E_DECLARE_STATS_GRP(802_3);
extern MLX5E_DECLARE_STATS_GRP(2863);
extern MLX5E_DECLARE_STATS_GRP(2819);
extern MLX5E_DECLARE_STATS_GRP(phy);
extern MLX5E_DECLARE_STATS_GRP(eth_ext);
extern MLX5E_DECLARE_STATS_GRP(pcie);
extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
#endif /* __MLX5_EN_STATS_H__ */ #endif /* __MLX5_EN_STATS_H__ */
...@@ -1810,6 +1810,40 @@ static void *get_match_headers_value(u32 flags, ...@@ -1810,6 +1810,40 @@ static void *get_match_headers_value(u32 flags,
outer_headers); outer_headers);
} }
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct net_device *ingress_dev;
struct flow_match_meta match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
return 0;
flow_rule_match_meta(rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EINVAL;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
return -EINVAL;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
return -EINVAL;
}
return 0;
}
static int __parse_cls_flower(struct mlx5e_priv *priv, static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct flow_cls_offload *f, struct flow_cls_offload *f,
...@@ -1830,6 +1864,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1830,6 +1864,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0; u16 addr_type = 0;
u8 ip_proto = 0; u8 ip_proto = 0;
u8 *match_level; u8 *match_level;
int err;
match_level = outer_match_level; match_level = outer_match_level;
...@@ -1873,6 +1908,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1873,6 +1908,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
spec); spec);
} }
err = mlx5e_flower_parse_meta(filter_dev, f);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match; struct flow_match_basic match;
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* pools. * pools.
*/ */
#define ESW_SIZE (16 * 1024 * 1024) #define ESW_SIZE (16 * 1024 * 1024)
const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024, 1 * 1024 * 1024,
64 * 1024, 64 * 1024,
4 * 1024, }; 4 * 1024, };
......
...@@ -419,6 +419,28 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) ...@@ -419,6 +419,28 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv); mlx5e_destroy_q_counters(priv);
} }
/* The stats groups order is opposite to the update_stats() order calls */
static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
&MLX5E_STATS_GRP(sw),
&MLX5E_STATS_GRP(qcnt),
&MLX5E_STATS_GRP(vnic_env),
&MLX5E_STATS_GRP(vport),
&MLX5E_STATS_GRP(802_3),
&MLX5E_STATS_GRP(2863),
&MLX5E_STATS_GRP(2819),
&MLX5E_STATS_GRP(phy),
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
};
static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
{
return ARRAY_SIZE(mlx5i_stats_grps);
}
static const struct mlx5e_profile mlx5i_nic_profile = { static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init, .init = mlx5i_init,
.cleanup = mlx5i_cleanup, .cleanup = mlx5i_cleanup,
...@@ -435,6 +457,8 @@ static const struct mlx5e_profile mlx5i_nic_profile = { ...@@ -435,6 +457,8 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC, .max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
}; };
/* mlx5i netdev NDos */ /* mlx5i netdev NDos */
......
...@@ -32,6 +32,7 @@ enum { ...@@ -32,6 +32,7 @@ enum {
}; };
enum { enum {
MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2, MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3, MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
}; };
...@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits { ...@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits {
u8 inline_data[0x20]; u8 inline_data[0x20];
}; };
struct mlx5_ifc_dr_action_hw_copy_bits {
u8 opcode[0x8];
u8 destination_field_code[0x8];
u8 reserved_at_10[0x2];
u8 destination_left_shifter[0x6];
u8 reserved_at_18[0x2];
u8 destination_length[0x6];
u8 reserved_at_20[0x8];
u8 source_field_code[0x8];
u8 reserved_at_30[0x2];
u8 source_left_shifter[0x6];
u8 reserved_at_38[0x8];
};
#endif /* MLX5_IFC_DR_H */ #endif /* MLX5_IFC_DR_H */
...@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) ...@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
len = nstrides << wq->fbc.log_stride; len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix); wqe = mlx5_wq_cyc_get_wqe(wq, ix);
pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n", pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment