Commit 4871953c authored by Dongdong Deng's avatar Dongdong Deng Committed by David S. Miller

drivers/net: fixed drivers that support netpoll use ndo_start_xmit()

The NETPOLL API requires that interrupts remain disabled in
netpoll_send_skb(). The use of "A functions set" in the NETPOLL API
callbacks causes the interrupts to get enabled and can lead to kernel
instability.

The solution is to use "B functions set" to prevent the irqs from
getting enabled while in netpoll_send_skb().

A functions set:
local_irq_disable()/local_irq_enable()
spin_lock_irq()/spin_unlock_irq()
spin_trylock_irq()/spin_unlock_irq()

B functions set:
local_irq_save()/local_irq_restore()
spin_lock_irqsave()/spin_unlock_irqrestore()
spin_trylock_irqsave()/spin_unlock_irqrestore()
Signed-off-by: default avatarDongdong Deng <dongdong.deng@windriver.com>
Acked-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 79b1bee8
...@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct bcom_fec_bd *bd; struct bcom_fec_bd *bd;
unsigned long flags;
if (bcom_queue_full(priv->tx_dmatsk)) { if (bcom_queue_full(priv->tx_dmatsk)) {
if (net_ratelimit()) if (net_ratelimit())
...@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
spin_lock_irq(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
dev->trans_start = jiffies; dev->trans_start = jiffies;
bd = (struct bcom_fec_bd *) bd = (struct bcom_fec_bd *)
...@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev); netif_stop_queue(dev);
} }
spin_unlock_irq(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
struct ixpdev_priv *ip = netdev_priv(dev); struct ixpdev_priv *ip = netdev_priv(dev);
struct ixpdev_tx_desc *desc; struct ixpdev_tx_desc *desc;
int entry; int entry;
unsigned long flags;
if (unlikely(skb->len > PAGE_SIZE)) { if (unlikely(skb->len > PAGE_SIZE)) {
/* @@@ Count drops. */ /* @@@ Count drops. */
...@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies; dev->trans_start = jiffies;
local_irq_disable(); local_irq_save(flags);
ip->tx_queue_entries++; ip->tx_queue_entries++;
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
netif_stop_queue(dev); netif_stop_queue(dev);
local_irq_enable(); local_irq_restore(flags);
return 0; return 0;
} }
......
...@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping; dma_addr_t mapping;
unsigned int len, entry; unsigned int len, entry;
u32 ctrl; u32 ctrl;
unsigned long flags;
#ifdef DEBUG #ifdef DEBUG
int i; int i;
...@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif #endif
len = skb->len; len = skb->len;
spin_lock_irq(&bp->lock); spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */ /* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(bp) < 1) { if (TX_BUFFS_AVAIL(bp) < 1) {
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_irq(&bp->lock); spin_unlock_irqrestore(&bp->lock, flags);
dev_err(&bp->pdev->dev, dev_err(&bp->pdev->dev,
"BUG! Tx Ring full when queue awake!\n"); "BUG! Tx Ring full when queue awake!\n");
dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
...@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1) if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_irq(&bp->lock); spin_unlock_irqrestore(&bp->lock, flags);
dev->trans_start = jiffies; dev->trans_start = jiffies;
......
...@@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) ...@@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
{ {
struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
unsigned long flags;
/* If we don't have a pending timer, set one up to catch our recent /* If we don't have a pending timer, set one up to catch our recent
post in case the interface becomes idle */ post in case the interface becomes idle */
...@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) ...@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
if (spin_trylock_irq(&ring->comp_lock)) { if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
mlx4_en_process_tx_cq(priv->dev, cq); mlx4_en_process_tx_cq(priv->dev, cq);
spin_unlock_irq(&ring->comp_lock); spin_unlock_irqrestore(&ring->comp_lock, flags);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment