Commit f5074d0c authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-100G-fixes'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 fixes#2 for 4.7-rc

The following series provides one-liners fixes for mlx5 driver plus one
medium patch to reorganize ethtool counters reporting.

Highlights:
	- Added MODIFY_FLOW_TABLE to command strings table
	- Add ConnectX-5 PCIe 4.0 to list of supported devices
	- Rename ASYNC_EVENTS enum
	- Enable BlueFlame only when supported by device
	- Avoid adding same vxlan port twice
	- Report the correct number of PFC counters
	- Reorganize ethtool reported counters and remove duplications
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3ec0a0f1 bfe6d8d1
...@@ -545,6 +545,7 @@ const char *mlx5_command_str(int command) ...@@ -545,6 +545,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -401,7 +401,7 @@ enum mlx5e_traffic_types { ...@@ -401,7 +401,7 @@ enum mlx5e_traffic_types {
}; };
enum { enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE, MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED, MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING, MLX5E_STATE_DESTROYING,
}; };
......
...@@ -184,7 +184,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) ...@@ -184,7 +184,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
#define MLX5E_NUM_SQ_STATS(priv) \ #define MLX5E_NUM_SQ_STATS(priv) \
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state)) test_bit(MLX5E_STATE_OPENED, &priv->state))
#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) #define MLX5E_NUM_PFC_COUNTERS(priv) \
(hweight8(mlx5e_query_pfc_combined(priv)) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset) static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{ {
...@@ -211,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) ...@@ -211,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* SW counters */ /* SW counters */
for (i = 0; i < NUM_SW_COUNTERS; i++) for (i = 0; i < NUM_SW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */ /* Q counters */
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */ /* VPORT counters */
for (i = 0; i < NUM_VPORT_COUNTERS; i++) for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_stats_desc[i].name); vport_stats_desc[i].format);
/* PPORT counters */ /* PPORT counters */
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_802_3_stats_desc[i].name); pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2863_stats_desc[i].name); pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].name); pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", sprintf(data + (idx++) * ETH_GSTRING_LEN,
prio, pport_per_prio_traffic_stats_desc[i].format, prio);
pport_per_prio_traffic_stats_desc[i].name);
} }
pfc_combined = mlx5e_query_pfc_combined(priv); pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", sprintf(data + (idx++) * ETH_GSTRING_LEN,
prio, pport_per_prio_pfc_stats_desc[i].name); pport_per_prio_pfc_stats_desc[i].format, prio);
} }
} }
...@@ -256,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) ...@@ -256,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* per channel counters */ /* per channel counters */
for (i = 0; i < priv->params.num_channels; i++) for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].name); rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++) for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++) for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
"tx%d_%s", sq_stats_desc[j].format,
priv->channeltc_to_txq_map[i][tc], priv->channeltc_to_txq_map[i][tc]);
sq_stats_desc[j].name);
} }
static void mlx5e_get_strings(struct net_device *dev, static void mlx5e_get_strings(struct net_device *dev,
......
...@@ -105,11 +105,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) ...@@ -105,11 +105,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
s->lro_packets += rq_stats->lro_packets; s->rx_lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes; s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none; s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw; s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_inner += rq_stats->csum_inner; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err; s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag; s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
...@@ -122,24 +122,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) ...@@ -122,24 +122,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
s->tso_packets += sq_stats->tso_packets; s->tx_tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes; s->tx_tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake; s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped; s->tx_queue_dropped += sq_stats->dropped;
s->tx_csum_inner += sq_stats->csum_offload_inner; s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_offload_none; tx_offload_none += sq_stats->csum_none;
} }
} }
/* Update calculated offload counters */ /* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none - s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
s->rx_csum_sw;
s->link_down_events = MLX5_GET(ppcnt_reg, s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters, priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events); counter_set.phys_layer_cntrs.link_down_events);
} }
...@@ -244,7 +243,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, ...@@ -244,7 +243,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
{ {
struct mlx5e_priv *priv = vpriv; struct mlx5e_priv *priv = vpriv;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return; return;
switch (event) { switch (event) {
...@@ -260,12 +259,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, ...@@ -260,12 +259,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
static void mlx5e_enable_async_events(struct mlx5e_priv *priv) static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{ {
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
} }
static void mlx5e_disable_async_events(struct mlx5e_priv *priv) static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{ {
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
} }
...@@ -580,7 +579,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -580,7 +579,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err; int err;
err = mlx5_alloc_map_uar(mdev, &sq->uar, true); err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err) if (err)
return err; return err;
......
...@@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (is_first_ethertype_ip(skb)) { if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++; rq->stats.csum_complete++;
return; return;
} }
...@@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (cqe_is_tunneled(cqe)) { if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1; skb->csum_level = 1;
skb->encapsulation = 1; skb->encapsulation = 1;
rq->stats.csum_inner++; rq->stats.csum_unnecessary_inner++;
} }
return; return;
} }
......
...@@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) { if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM; MLX5_ETH_WQE_L4_INNER_CSUM;
sq->stats.csum_offload_inner++; sq->stats.csum_partial_inner++;
} else { } else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
} }
} else } else
sq->stats.csum_offload_none++; sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) { if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc; sq->prev_cc = sq->cc;
......
...@@ -1508,8 +1508,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = { ...@@ -1508,8 +1508,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, } { 0, }
}; };
......
...@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) ...@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan; struct mlx5e_vxlan *vxlan;
int err; int err;
if (mlx5e_vxlan_lookup_port(priv, port))
goto free_work;
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work; goto free_work;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment