Commit 8a4d0b93 authored by Florian Westphal's avatar Florian Westphal Committed by Jeff Kirsher

net: e1000: support txtd update delay via xmit_more

Don't update Tx tail descriptor if we queue hasn't been stopped and
we know at least one more skb will be sent right away.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 6ddbc4cf
......@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring, int tx_flags,
int count)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_tx_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
......@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}
/* 82547 workaround to avoid controller hang in half-duplex environment.
......@@ -3264,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to
* our tail at a time, it synchronizes IO on IA64/Altix
* systems
*/
mmiowb();
}
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment