Commit 5efeac44 authored by Eric W. Biederman's avatar Eric W. Biederman Committed by David S. Miller

netpoll: Respect NETIF_F_LLTX

Stop taking the transmit lock when a network device has specified
NETIF_F_LLTX.

If no locks needed to trasnmit a packet this is the ideal scenario for
netpoll as all packets can be trasnmitted immediately.

Even if some locks are needed in ndo_start_xmit skipping any unnecessary
serialization is desirable for netpoll as it makes it more likely a
debugging packet may be trasnmitted immediately instead of being
deferred until later.
Signed-off-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 080b3c19
...@@ -2909,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) ...@@ -2909,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
} \ } \
} }
#define HARD_TX_TRYLOCK(dev, txq) \
(((dev->features & NETIF_F_LLTX) == 0) ? \
__netif_tx_trylock(txq) : \
true )
#define HARD_TX_UNLOCK(dev, txq) { \ #define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_unlock(txq); \ __netif_tx_unlock(txq); \
......
...@@ -119,17 +119,17 @@ static void queue_process(struct work_struct *work) ...@@ -119,17 +119,17 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save(flags); local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) || if (netif_xmit_frozen_or_stopped(txq) ||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb); skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq); HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags); local_irq_restore(flags);
schedule_delayed_work(&npinfo->tx_work, HZ/10); schedule_delayed_work(&npinfo->tx_work, HZ/10);
return; return;
} }
__netif_tx_unlock(txq); HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags); local_irq_restore(flags);
} }
} }
...@@ -345,11 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, ...@@ -345,11 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* try until next clock tick */ /* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) { tries > 0; --tries) {
if (__netif_tx_trylock(txq)) { if (HARD_TX_TRYLOCK(dev, txq)) {
if (!netif_xmit_stopped(txq)) if (!netif_xmit_stopped(txq))
status = netpoll_start_xmit(skb, dev, txq); status = netpoll_start_xmit(skb, dev, txq);
__netif_tx_unlock(txq); HARD_TX_UNLOCK(dev, txq);
if (status == NETDEV_TX_OK) if (status == NETDEV_TX_OK)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment