Commit 5b263f53 authored by Yevgeny Petrilin's avatar Yevgeny Petrilin Committed by David S. Miller

mlx4_en: Byte Queue Limit support

Signed-off-by: default avatarYevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e22979d9
...@@ -667,6 +667,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -667,6 +667,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq); mlx4_en_deactivate_cq(priv, cq);
goto tx_err; goto tx_err;
} }
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
/* Arm CQ for TX completions */ /* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq); mlx4_en_arm_cq(priv, cq);
...@@ -812,12 +813,15 @@ static void mlx4_en_restart(struct work_struct *work) ...@@ -812,12 +813,15 @@ static void mlx4_en_restart(struct work_struct *work)
watchdog_task); watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev; struct net_device *dev = priv->dev;
int i;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev);
for (i = 0; i < priv->tx_ring_num; i++)
netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
if (mlx4_en_start_port(dev)) if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port); en_err(priv, "Failed restarting port %d\n", priv->port);
} }
......
...@@ -315,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ...@@ -315,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
int size = cq->size; int size = cq->size;
u32 size_mask = ring->size_mask; u32 size_mask = ring->size_mask;
struct mlx4_cqe *buf = cq->buf; struct mlx4_cqe *buf = cq->buf;
u32 packets = 0;
u32 bytes = 0;
if (!priv->port_up) if (!priv->port_up)
return; return;
...@@ -343,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ...@@ -343,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
priv, ring, ring_index, priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) & !!((ring->cons + txbbs_skipped) &
ring->size)); ring->size));
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index); } while (ring_index != new_index);
++cons_index; ++cons_index;
...@@ -359,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ...@@ -359,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
mlx4_cq_set_ci(mcq); mlx4_cq_set_ci(mcq);
wmb(); wmb();
ring->cons += txbbs_skipped; ring->cons += txbbs_skipped;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
/* Wakeup Tx queue if this ring stopped it */ /* Wakeup Tx queue if this ring stopped it */
if (unlikely(ring->blocked)) { if (unlikely(ring->blocked)) {
if ((u32) (ring->prod - ring->cons) <= if ((u32) (ring->prod - ring->cons) <=
ring->size - HEADROOM - MAX_DESC_TXBBS) { ring->size - HEADROOM - MAX_DESC_TXBBS) {
ring->blocked = 0; ring->blocked = 0;
netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++; priv->port_stats.wake_queue++;
} }
} }
...@@ -583,7 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -583,7 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(((int)(ring->prod - ring->cons)) > if (unlikely(((int)(ring->prod - ring->cons)) >
ring->size - HEADROOM - MAX_DESC_TXBBS)) { ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */ /* every full Tx ring stops queue */
netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); netif_tx_stop_queue(ring->tx_queue);
ring->blocked = 1; ring->blocked = 1;
priv->port_stats.queue_stopped++; priv->port_stats.queue_stopped++;
...@@ -649,7 +654,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -649,7 +654,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
priv->port_stats.tso_packets++; priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
ring->bytes += skb->len + (i - 1) * lso_header_size; tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
ring->packets += i; ring->packets += i;
} else { } else {
/* Normal (Non LSO) packet */ /* Normal (Non LSO) packet */
...@@ -657,10 +662,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -657,10 +662,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ? ((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
data = &tx_desc->data; data = &tx_desc->data;
ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++; ring->packets++;
} }
ring->bytes += tx_info->nr_bytes;
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
......
...@@ -200,6 +200,7 @@ enum cq_type { ...@@ -200,6 +200,7 @@ enum cq_type {
struct mlx4_en_tx_info { struct mlx4_en_tx_info {
struct sk_buff *skb; struct sk_buff *skb;
u32 nr_txbb; u32 nr_txbb;
u32 nr_bytes;
u8 linear; u8 linear;
u8 data_offset; u8 data_offset;
u8 inl; u8 inl;
...@@ -257,6 +258,7 @@ struct mlx4_en_tx_ring { ...@@ -257,6 +258,7 @@ struct mlx4_en_tx_ring {
unsigned long tx_csum; unsigned long tx_csum;
struct mlx4_bf bf; struct mlx4_bf bf;
bool bf_enabled; bool bf_enabled;
struct netdev_queue *tx_queue;
}; };
struct mlx4_en_rx_desc { struct mlx4_en_rx_desc {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment