Commit 2df1a70a authored by Tom Herbert's avatar Tom Herbert Committed by David S. Miller

bnx2x: Support for byte queue limits

Changes to bnx2x to use byte queue limits.
Signed-off-by: default avatarTom Herbert <therbert@google.com>
Acked-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 298376d3
...@@ -102,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ ...@@ -102,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
* return idx of last bd freed * return idx of last bd freed
*/ */
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
u16 idx) u16 idx, unsigned int *pkts_compl,
unsigned int *bytes_compl)
{ {
struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
struct eth_tx_start_bd *tx_start_bd; struct eth_tx_start_bd *tx_start_bd;
...@@ -159,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, ...@@ -159,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */ /* release skb */
WARN_ON(!skb); WARN_ON(!skb);
if (skb) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
}
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_buf->first_bd = 0; tx_buf->first_bd = 0;
tx_buf->skb = NULL; tx_buf->skb = NULL;
...@@ -170,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) ...@@ -170,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{ {
struct netdev_queue *txq; struct netdev_queue *txq;
u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
unsigned int pkts_compl = 0, bytes_compl = 0;
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic)) if (unlikely(bp->panic))
...@@ -189,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) ...@@ -189,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
" pkt_cons %u\n", " pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons); txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
&pkts_compl, &bytes_compl);
sw_cons++; sw_cons++;
} }
netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
txdata->tx_pkt_cons = sw_cons; txdata->tx_pkt_cons = sw_cons;
txdata->tx_bd_cons = bd_cons; txdata->tx_bd_cons = bd_cons;
...@@ -1077,14 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) ...@@ -1077,14 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) { for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
unsigned pkts_compl = 0, bytes_compl = 0;
u16 sw_prod = txdata->tx_pkt_prod; u16 sw_prod = txdata->tx_pkt_prod;
u16 sw_cons = txdata->tx_pkt_cons; u16 sw_cons = txdata->tx_pkt_cons;
while (sw_cons != sw_prod) { while (sw_cons != sw_prod) {
bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons)); bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
&pkts_compl, &bytes_compl);
sw_cons++; sw_cons++;
} }
netdev_tx_reset_queue(
netdev_get_tx_queue(bp->dev, txdata->txq_index));
} }
} }
} }
...@@ -2788,6 +2802,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2788,6 +2802,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE); skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
unsigned int pkts_compl = 0, bytes_compl = 0;
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
"dropping packet...\n"); "dropping packet...\n");
...@@ -2799,7 +2814,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2799,7 +2814,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
first_bd->nbd = cpu_to_le16(nbd); first_bd->nbd = cpu_to_le16(nbd);
bnx2x_free_tx_pkt(bp, txdata, bnx2x_free_tx_pkt(bp, txdata,
TX_BD(txdata->tx_pkt_prod)); TX_BD(txdata->tx_pkt_prod),
&pkts_compl, &bytes_compl);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -2860,6 +2876,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2860,6 +2876,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e2->parsing_data); pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
netdev_tx_sent_queue(txq, skb->len);
txdata->tx_pkt_prod++; txdata->tx_pkt_prod++;
/* /*
* Make sure that the BD data is updated before updating the producer * Make sure that the BD data is updated before updating the producer
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment