Commit b8128c42 authored by David S. Miller's avatar David S. Miller

Merge branch 'mvneta-xmit_more-bql'

Marcin Wojtas says:

====================
mvneta xmit_more and bql support

This is a delayed v2 of short patchset, which introduces xmit_more and BQL
to mvneta driver. The only one change was added in xmit_more support -
condition check preventing excessive descriptors concatenation before
flushing in HW.

Any comments or feedback would be welcome.

Changelog:
v1 -> v2:

* Add checking condition that ensures too much descriptors are not
  concatenated before flushing in HW.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a3308d8f a29b6235
...@@ -224,6 +224,7 @@ ...@@ -224,6 +224,7 @@
#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
...@@ -525,6 +526,7 @@ struct mvneta_tx_queue { ...@@ -525,6 +526,7 @@ struct mvneta_tx_queue {
* descriptor ring * descriptor ring
*/ */
int count; int count;
int pending;
int tx_stop_threshold; int tx_stop_threshold;
int tx_wake_threshold; int tx_wake_threshold;
...@@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, ...@@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
/* Only 255 descriptors can be added at once ; Assume caller /* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256 * process TX desriptors in quanta less than 256
*/ */
val = pend_desc; val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
txq->pending = 0;
} }
/* Get pointer to next TX descriptor to be processed (send) by HW */ /* Get pointer to next TX descriptor to be processed (send) by HW */
...@@ -1756,8 +1759,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, ...@@ -1756,8 +1759,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */ /* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp, static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct mvneta_tx_queue *txq, int num) struct mvneta_tx_queue *txq, int num,
struct netdev_queue *nq)
{ {
unsigned int bytes_compl = 0, pkts_compl = 0;
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
...@@ -1765,6 +1770,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, ...@@ -1765,6 +1770,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
txq->txq_get_index; txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
if (skb) {
bytes_compl += skb->len;
pkts_compl++;
}
mvneta_txq_inc_get(txq); mvneta_txq_inc_get(txq);
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
...@@ -1775,6 +1785,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, ...@@ -1775,6 +1785,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
continue; continue;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
} }
/* Handle end of transmission */ /* Handle end of transmission */
...@@ -1788,7 +1800,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, ...@@ -1788,7 +1800,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done) if (!tx_done)
return; return;
mvneta_txq_bufs_free(pp, txq, tx_done); mvneta_txq_bufs_free(pp, txq, tx_done, nq);
txq->count -= tx_done; txq->count -= tx_done;
...@@ -2398,12 +2410,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -2398,12 +2410,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq->count += frags; netdev_tx_sent_queue(nq, len);
mvneta_txq_pend_desc_add(pp, txq, frags);
txq->count += frags;
if (txq->count >= txq->tx_stop_threshold) if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq); netif_tx_stop_queue(nq);
if (!skb->xmit_more || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
else
txq->pending += frags;
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->tx_packets++; stats->tx_packets++;
stats->tx_bytes += len; stats->tx_bytes += len;
...@@ -2422,9 +2440,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp, ...@@ -2422,9 +2440,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct mvneta_tx_queue *txq) struct mvneta_tx_queue *txq)
{ {
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count; int tx_done = txq->count;
mvneta_txq_bufs_free(pp, txq, tx_done); mvneta_txq_bufs_free(pp, txq, tx_done, nq);
/* reset txq */ /* reset txq */
txq->count = 0; txq->count = 0;
...@@ -2950,6 +2969,8 @@ static int mvneta_txq_init(struct mvneta_port *pp, ...@@ -2950,6 +2969,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
static void mvneta_txq_deinit(struct mvneta_port *pp, static void mvneta_txq_deinit(struct mvneta_port *pp,
struct mvneta_tx_queue *txq) struct mvneta_tx_queue *txq)
{ {
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
kfree(txq->tx_skb); kfree(txq->tx_skb);
if (txq->tso_hdrs) if (txq->tso_hdrs)
...@@ -2961,6 +2982,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, ...@@ -2961,6 +2982,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys); txq->descs, txq->descs_phys);
netdev_tx_reset_queue(nq);
txq->descs = NULL; txq->descs = NULL;
txq->last_desc = 0; txq->last_desc = 0;
txq->next_desc_to_proc = 0; txq->next_desc_to_proc = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment