Commit 15bffdff authored by Eugenia Emantayev's avatar Eugenia Emantayev Committed by David S. Miller

net/mlx4_en: Move queue stopped/waked counters to be per ring

Give accurate counters and avoids cache misses when several rings
update the counters of stop/wake queue.
Signed-off-by: default avatarEugenia Emantayev <eugenia@mellanox.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 93591aaa
...@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) ...@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets = 0; stats->tx_packets = 0;
stats->tx_bytes = 0; stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0; priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i]->packets; stats->tx_packets += priv->tx_ring[i]->packets;
stats->tx_bytes += priv->tx_ring[i]->bytes; stats->tx_bytes += priv->tx_ring[i]->bytes;
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
priv->port_stats.queue_stopped +=
priv->tx_ring[i]->queue_stopped;
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
} }
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
......
...@@ -445,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, ...@@ -445,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
*/ */
if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
netif_tx_wake_queue(ring->tx_queue); netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++; ring->wake_queue++;
} }
return done; return done;
} }
...@@ -691,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -691,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) { ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */ /* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue); netif_tx_stop_queue(ring->tx_queue);
priv->port_stats.queue_stopped++; ring->queue_stopped++;
/* If queue was emptied after the if, and before the /* If queue was emptied after the if, and before the
* stop_queue - need to wake the queue, or else it will remain * stop_queue - need to wake the queue, or else it will remain
...@@ -704,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -704,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(((int)(ring->prod - ring->cons)) <= if (unlikely(((int)(ring->prod - ring->cons)) <=
ring->size - HEADROOM - MAX_DESC_TXBBS)) { ring->size - HEADROOM - MAX_DESC_TXBBS)) {
netif_tx_wake_queue(ring->tx_queue); netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++; ring->wake_queue++;
} else { } else {
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
......
...@@ -274,6 +274,8 @@ struct mlx4_en_tx_ring { ...@@ -274,6 +274,8 @@ struct mlx4_en_tx_ring {
unsigned long bytes; unsigned long bytes;
unsigned long packets; unsigned long packets;
unsigned long tx_csum; unsigned long tx_csum;
unsigned long queue_stopped;
unsigned long wake_queue;
struct mlx4_bf bf; struct mlx4_bf bf;
bool bf_enabled; bool bf_enabled;
struct netdev_queue *tx_queue; struct netdev_queue *tx_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment