Commit c578e9ab authored by David S. Miller's avatar David S. Miller

Merge branch 'netdev_tx_locked-removal'

Florian Westphal says:

====================
net: core: remove TX_LOCKED support

Not that many users left, lets kill it.

TX_LOCKED was meant to be used by LLTX drivers when spin_trylock()
failed.  Stack then re-queued if collisions happened on different
cpus or free'd the skb to prevent deadlocks.

Most of the driver removal patches fall into one of three categories:
1. remove the driver-private tx lock (and LLTX flag), or...
2. convert spin_trylock to plain spin_lock, or...
3. convert TX_LOCKED to free+TX_OK

Patches are grouped by these categories, last patch is the actual removal.
All driver changes were compile tested only with exception of atl1e.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c971c0e5 f0cdf76c
......@@ -131,13 +131,11 @@ stack. Driver should not change behaviour based on them.
* LLTX driver (deprecated for hardware drivers)
NETIF_F_LLTX should be set in drivers that implement their own locking in
transmit path or don't need locking at all (e.g. software tunnels).
In ndo_start_xmit, it is recommended to use a try_lock and return
NETDEV_TX_LOCKED when the spin lock fails. The locking should also properly
protect against other callbacks (the rules you need to find out).
NETIF_F_LLTX is meant to be used by drivers that don't need locking at all,
e.g. software tunnels.
Don't use it for new drivers.
This is also used in a few legacy drivers that implement their
own locking, don't use it for new (hardware) drivers.
* netns-local device
......
......@@ -69,10 +69,9 @@ ndo_start_xmit:
When the driver sets NETIF_F_LLTX in dev->features this will be
called without holding netif_tx_lock. In this case the driver
has to lock by itself when needed. It is recommended to use a try lock
for this and return NETDEV_TX_LOCKED when the spin lock fails.
has to lock by itself when needed.
The locking there should also properly protect against
set_rx_mode. Note that the use of NETIF_F_LLTX is deprecated.
set_rx_mode. WARNING: use of NETIF_F_LLTX is deprecated.
Don't use it for new drivers.
Context: Process with BHs disabled or BH (timer),
......@@ -83,8 +82,6 @@ ndo_start_xmit:
o NETDEV_TX_BUSY Cannot transmit packet, try later
Usually a bug, means queue start/stop flow control is broken in
the driver. Note: the driver must NOT put the skb in its DMA ring.
o NETDEV_TX_LOCKED Locking failed, please retry quickly.
Only valid when NETIF_F_LLTX is set.
ndo_tx_timeout:
Synchronization: netif_tx_lock spinlock; all TX queues frozen.
......
......@@ -356,7 +356,7 @@ static int nes_netdev_stop(struct net_device *netdev)
/**
* nes_nic_send
*/
static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
......@@ -413,7 +413,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
kfree_skb(skb);
nesvnic->tx_sw_dropped++;
return NETDEV_TX_LOCKED;
return false;
}
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
......@@ -454,8 +454,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size - 1;
return NETDEV_TX_OK;
return true;
}
......@@ -673,13 +672,11 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_linearize(skb);
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK)
if (!nes_nic_send(skb, netdev))
return NETDEV_TX_OK;
}
} else {
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK)
if (!nes_nic_send(skb, netdev))
return NETDEV_TX_OK;
}
......
......@@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int outs;
unsigned long flags;
if (!TX_BUFFS_AVAIL)
return NETDEV_TX_LOCKED;
netif_stop_queue(dev);
if (!TX_BUFFS_AVAIL) {
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
skblen = skb->len;
#ifdef DEBUG_DRIVER
......
......@@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
local_irq_save(flags);
if (!lance_tx_buffs_avail(lp)) {
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
if (!lance_tx_buffs_avail(lp))
goto out_free;
#ifdef DEBUG
/* dump the packet */
......@@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
out_free:
dev_kfree_skb(skb);
local_irq_restore(flags);
......
......@@ -488,7 +488,7 @@ struct atl1c_tpd_ring {
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use; /* this is protectd by adapter->tx_lock */
u16 next_to_use;
atomic_t next_to_clean;
struct atl1c_buffer *buffer_info;
};
......@@ -542,7 +542,6 @@ struct atl1c_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct common_task;
......
......@@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_set_rxbufsize(adapter, adapter->netdev);
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
......@@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
u16 tpd_req = 1;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
......@@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
}
tpd_req = atl1c_cal_tpd_req(skb);
if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
if (netif_msg_pktdata(adapter))
dev_info(&adapter->pdev->dev, "tx locked\n");
return NETDEV_TX_LOCKED;
}
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
......@@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......@@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
"tx-skb droppted due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
return NETDEV_TX_OK;
......
......@@ -442,7 +442,6 @@ struct atl1e_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct reset_task;
......
......@@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter)
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
......@@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
u16 tpd_req = 1;
struct atl1e_tpd_desc *tpd;
......@@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
return NETDEV_TX_LOCKED;
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
......@@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......@@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
}
atl1e_tx_queue(adapter, tpd_req, tpd);
netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
out:
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
......@@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_LLTX |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
/* not enabled by default */
netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
......
......@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
if (!spin_trylock(&q->lock))
return NETDEV_TX_LOCKED;
spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
......
......@@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (!lp->tx_enable) /* Cannot send for now */
return NETDEV_TX_LOCKED;
goto tx_err;
/*
** Clean out the TX ring asynchronously to interrupts - sometimes the
......@@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
/* Test if cache is already locked - requeue skb if so */
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
return NETDEV_TX_LOCKED;
goto tx_err;
/* Transmit descriptor ring full or stale skb */
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
......@@ -1518,6 +1518,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
lp->cache.lock = 0;
return NETDEV_TX_OK;
tx_err:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags = 0;
u16 vlan_tag = 0;
struct fifo_info *fifo = NULL;
int do_spin_lock = 1;
int offload_type;
int enable_per_list_interrupt = 0;
struct config_param *config = &sp->config;
......@@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
queue += sp->udp_fifo_idx;
if (skb->len > 1024)
enable_per_list_interrupt = 1;
do_spin_lock = 0;
}
}
}
......@@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
if (do_spin_lock)
spin_lock_irqsave(&fifo->tx_lock, flags);
else {
if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
return NETDEV_TX_LOCKED;
}
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
......
......@@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
/* Collision - tell upper layer to requeue */
return NETDEV_TX_LOCKED;
}
spin_trylock_irqsave(&tx_ring->tx_lock, flags);
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
......
......@@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
* o NETDEV_TX_BUSY Cannot transmit packet, try later
* Usually a bug, means queue start/stop flow control is broken in
* the driver. Note: the driver must NOT put the skb in its DMA ring.
* o NETDEV_TX_LOCKED Locking failed, please retry quickly.
*/
static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
struct net_device *ndev)
......@@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
ENTER;
local_irq_save(flags);
if (!spin_trylock(&priv->tx_lock)) {
local_irq_restore(flags);
DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
BDX_DRV_NAME, ndev->name);
return NETDEV_TX_LOCKED;
}
spin_lock(&priv->tx_lock);
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
......
......@@ -780,8 +780,10 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (bc->skb)
return NETDEV_TX_LOCKED;
if (bc->skb) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* strip KISS byte */
if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
dev_kfree_skb(skb);
......
......@@ -412,8 +412,10 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (sm->skb)
return NETDEV_TX_LOCKED;
if (sm->skb) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
netif_stop_queue(dev);
sm->skb = skb;
return NETDEV_TX_OK;
......
......@@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
int add_num = 1;
local_irq_save(flags);
if (!spin_trylock(&rnet->tx_lock)) {
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
spin_lock_irqsave(&rnet->tx_lock, flags);
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
......
......@@ -106,7 +106,6 @@ enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
};
typedef enum netdev_tx netdev_tx_t;
......@@ -831,7 +830,6 @@ struct tc_to_netdev {
* the queue before that can happen; it's for obsolete devices and weird
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
......@@ -2737,7 +2735,6 @@ struct softnet_data {
/* stats */
unsigned int processed;
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
......
......@@ -162,7 +162,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
"%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
sd->cpu_collision, sd->received_rps, flow_limit_count);
0, /* was cpu_collision */
sd->received_rps, flow_limit_count);
return 0;
}
......
......@@ -3472,7 +3472,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->odevname, ret);
pkt_dev->errors++;
/* fallthru */
case NETDEV_TX_LOCKED:
case NETDEV_TX_BUSY:
/* Retry it next time */
atomic_dec(&(pkt_dev->skb->users));
......
......@@ -108,35 +108,6 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
return skb;
}
static inline int handle_dev_cpu_collision(struct sk_buff *skb,
struct netdev_queue *dev_queue,
struct Qdisc *q)
{
int ret;
if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
/*
* Same CPU holding the lock. It may be a transient
* configuration error, when hard_start_xmit() recurses. We
* detect it by checking xmit owner and drop the packet when
* deadloop is detected. Return OK to try the next skb.
*/
kfree_skb_list(skb);
net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
* Another cpu is holding lock, requeue & delay xmits for
* some time.
*/
__this_cpu_inc(softnet_data.cpu_collision);
ret = dev_requeue_skb(skb, q);
}
return ret;
}
/*
* Transmit possibly several skbs, and handle the return status as
* required. Holding the __QDISC___STATE_RUNNING bit guarantees that
......@@ -174,9 +145,6 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
if (dev_xmit_complete(ret)) {
/* Driver sent out skb successfully or skb was consumed */
ret = qdisc_qlen(q);
} else if (ret == NETDEV_TX_LOCKED) {
/* Driver try lock failed */
ret = handle_dev_cpu_collision(skb, txq, q);
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely(ret != NETDEV_TX_BUSY))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment