Commit 2e27afb3 authored by Linus Torvalds's avatar Linus Torvalds

Revert "[NET]: Fix races in net_rx_action vs netpoll."

This reverts commit 29578624.

Ingo Molnar reports complete breakage with his e1000 card (no
networking, card reports transmit timeouts), and bisected it down to
this commit.  Let's figure out what went wrong, but not keep breaking
machines until we do.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Olaf Kirch <olaf.kirch@oracle.com>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c2dc1ad5
......@@ -262,8 +262,6 @@ enum netdev_state_t
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
__LINK_STATE_QDISC_RUNNING,
/* Set by the netpoll NAPI code */
__LINK_STATE_POLL_LIST_FROZEN,
};
......@@ -1022,14 +1020,6 @@ static inline void netif_rx_complete(struct net_device *dev)
{
unsigned long flags;
#ifdef CONFIG_NETPOLL
/* Prevent race with netpoll - yes, this is a kludge.
* But at least it doesn't penalize the non-netpoll
* code path. */
if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state))
return;
#endif
local_irq_save(flags);
__netif_rx_complete(dev);
local_irq_restore(flags);
......
......@@ -124,13 +124,6 @@ static void poll_napi(struct netpoll *np)
if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
npinfo->poll_owner != smp_processor_id() &&
spin_trylock(&npinfo->poll_lock)) {
/* When calling dev->poll from poll_napi, we may end up in
* netif_rx_complete. However, only the CPU to which the
* device was queued is allowed to remove it from poll_list.
* Setting POLL_LIST_FROZEN tells netif_rx_complete
* to leave the NAPI state alone.
*/
set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
......@@ -138,7 +131,6 @@ static void poll_napi(struct netpoll *np)
atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
spin_unlock(&npinfo->poll_lock);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment