Commit 38e6bc18 authored by Amerigo Wang's avatar Amerigo Wang Committed by David S. Miller

netpoll: make __netpoll_cleanup non-block

Like the previous patch, slave_disable_netpoll() and __netpoll_cleanup()
may be called with read_lock() held too, so we should make them
non-block, by moving the cleanup and kfree() to call_rcu_bh() callbacks.

Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarCong Wang <amwang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 47be03a2
...@@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave) ...@@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
return; return;
slave->np = NULL; slave->np = NULL;
synchronize_rcu_bh(); __netpoll_free_rcu(np);
__netpoll_cleanup(np);
kfree(np);
} }
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{ {
......
...@@ -23,6 +23,7 @@ struct netpoll { ...@@ -23,6 +23,7 @@ struct netpoll {
u8 remote_mac[ETH_ALEN]; u8 remote_mac[ETH_ALEN];
struct list_head rx; /* rx_np list element */ struct list_head rx; /* rx_np list element */
struct rcu_head rcu;
}; };
struct netpoll_info { struct netpoll_info {
...@@ -38,6 +39,7 @@ struct netpoll_info { ...@@ -38,6 +39,7 @@ struct netpoll_info {
struct delayed_work tx_work; struct delayed_work tx_work;
struct netpoll *netpoll; struct netpoll *netpoll;
struct rcu_head rcu;
}; };
void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
...@@ -48,6 +50,7 @@ int netpoll_setup(struct netpoll *np); ...@@ -48,6 +50,7 @@ int netpoll_setup(struct netpoll *np);
int netpoll_trap(void); int netpoll_trap(void);
void netpoll_set_trap(int trap); void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free_rcu(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np); void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb); int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
......
...@@ -704,11 +704,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev) ...@@ -704,11 +704,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
info->netpoll = NULL; info->netpoll = NULL;
/* Wait for transmitting packets to finish before freeing. */ __netpoll_free_rcu(netpoll);
synchronize_rcu_bh();
__netpoll_cleanup(netpoll);
kfree(netpoll);
} }
#endif /* CONFIG_NET_POLL_CONTROLLER */ #endif /* CONFIG_NET_POLL_CONTROLLER */
......
...@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p) ...@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
p->np = NULL; p->np = NULL;
/* Wait for transmitting packets to finish before freeing. */ __netpoll_free_rcu(np);
synchronize_rcu_bh();
__netpoll_cleanup(np);
kfree(np);
} }
#endif #endif
......
...@@ -878,6 +878,24 @@ static int __init netpoll_init(void) ...@@ -878,6 +878,24 @@ static int __init netpoll_init(void)
} }
core_initcall(netpoll_init); core_initcall(netpoll_init);
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
{
struct netpoll_info *npinfo =
container_of(rcu_head, struct netpoll_info, rcu);
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
/* we can't call cancel_delayed_work_sync here, as we are in softirq */
cancel_delayed_work(&npinfo->tx_work);
/* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq);
/* now cancel it again */
cancel_delayed_work(&npinfo->tx_work);
kfree(npinfo);
}
void __netpoll_cleanup(struct netpoll *np) void __netpoll_cleanup(struct netpoll *np)
{ {
struct netpoll_info *npinfo; struct netpoll_info *npinfo;
...@@ -903,20 +921,24 @@ void __netpoll_cleanup(struct netpoll *np) ...@@ -903,20 +921,24 @@ void __netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev); ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL); RCU_INIT_POINTER(np->dev->npinfo, NULL);
call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
}
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
/* avoid racing with NAPI reading npinfo */ static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
synchronize_rcu_bh(); {
struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
skb_queue_purge(&npinfo->arp_tx); __netpoll_cleanup(np);
skb_queue_purge(&npinfo->txq); kfree(np);
cancel_delayed_work_sync(&npinfo->tx_work); }
/* clean after last, unfinished work */ void __netpoll_free_rcu(struct netpoll *np)
__skb_queue_purge(&npinfo->txq); {
kfree(npinfo); call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
}
} }
EXPORT_SYMBOL_GPL(__netpoll_cleanup); EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
void netpoll_cleanup(struct netpoll *np) void netpoll_cleanup(struct netpoll *np)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment