Commit d8a0f1b0 authored by Paul Gortmaker's avatar Paul Gortmaker

gianfar: Add support for byte queue limits.

Add support for byte queue limits (BQL), based on the similar
modifications made to intel/igb/igb_main.c from Eric Dumazet
in commit bdbc0631
	"igb: Add support for byte queue limits."

A local variable for tx_queue->qindex was introduced in
gfar_clean_tx_ring, since it is now used often enough to warrant it,
and it cleans up the readability somewhat as well.
Signed-off-by: default avatarPaul Gortmaker <paul.gortmaker@windriver.com>
parent cdf485be
...@@ -1755,9 +1755,12 @@ static void free_skb_resources(struct gfar_private *priv) ...@@ -1755,9 +1755,12 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */ /* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) { for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
tx_queue = priv->tx_queue[i]; tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
if(tx_queue->tx_skbuff) if(tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue); free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
} }
for (i = 0; i < priv->num_rx_queues; i++) { for (i = 0; i < priv->num_rx_queues; i++) {
...@@ -2217,6 +2220,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2217,6 +2220,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
} }
netdev_tx_sent_queue(txq, skb->len);
/* /*
* We can work in parallel with gfar_clean_tx_ring(), except * We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock * when modifying num_txbdfree. Note that we didn't grab the lock
...@@ -2460,6 +2465,7 @@ static void gfar_align_skb(struct sk_buff *skb) ...@@ -2460,6 +2465,7 @@ static void gfar_align_skb(struct sk_buff *skb)
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{ {
struct net_device *dev = tx_queue->dev; struct net_device *dev = tx_queue->dev;
struct netdev_queue *txq;
struct gfar_private *priv = netdev_priv(dev); struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *bdp, *next = NULL; struct txbd8 *bdp, *next = NULL;
...@@ -2471,10 +2477,13 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ...@@ -2471,10 +2477,13 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
int frags = 0, nr_txbds = 0; int frags = 0, nr_txbds = 0;
int i; int i;
int howmany = 0; int howmany = 0;
int tqi = tx_queue->qindex;
unsigned int bytes_sent = 0;
u32 lstatus; u32 lstatus;
size_t buflen; size_t buflen;
rx_queue = priv->rx_queue[tx_queue->qindex]; rx_queue = priv->rx_queue[tqi];
txq = netdev_get_tx_queue(dev, tqi);
bdp = tx_queue->dirty_tx; bdp = tx_queue->dirty_tx;
skb_dirtytx = tx_queue->skb_dirtytx; skb_dirtytx = tx_queue->skb_dirtytx;
...@@ -2533,6 +2542,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ...@@ -2533,6 +2542,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size); bdp = next_txbd(bdp, base, tx_ring_size);
} }
bytes_sent += skb->len;
/* /*
* If there's room in the queue (limit it to rx_buffer_size) * If there's room in the queue (limit it to rx_buffer_size)
* we add this skb back into the pool, if it's the right size * we add this skb back into the pool, if it's the right size
...@@ -2557,13 +2568,15 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ...@@ -2557,13 +2568,15 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
} }
/* If we freed a buffer, we can restart transmission, if necessary */ /* If we freed a buffer, we can restart transmission, if necessary */
if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) if (__netif_subqueue_stopped(dev, tqi) && tx_queue->num_txbdfree)
netif_wake_subqueue(dev, tx_queue->qindex); netif_wake_subqueue(dev, tqi);
/* Update dirty indicators */ /* Update dirty indicators */
tx_queue->skb_dirtytx = skb_dirtytx; tx_queue->skb_dirtytx = skb_dirtytx;
tx_queue->dirty_tx = bdp; tx_queue->dirty_tx = bdp;
netdev_tx_completed_queue(txq, howmany, bytes_sent);
return howmany; return howmany;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment