Commit c9fbd71f authored by Debabrata Banerjee's avatar Debabrata Banerjee Committed by David S. Miller

netpoll: allow cleanup to be synchronous

This fixes a problem introduced by:
commit 2cde6acd ("netpoll: Fix __netpoll_rcu_free so that it can hold the rtnl lock")

When using netconsole on a bond, __netpoll_cleanup can asynchronously
recurse multiple times, each __netpoll_free_async call can result in
more __netpoll_free_async's. This means there is now a race between
cleanup_work queues on multiple netpoll_info's on multiple devices and
the configuration of a new netpoll. For example if a netconsole is set
to enable 0, reconfigured, and enable 1 immediately, this netconsole
will likely not work.

Given the reason for __netpoll_free_async is it can be called when rtnl
is not locked, if it is locked, we should be able to execute
synchronously. It appears to be locked everywhere it's called from.

Generalize the design pattern from the teaming driver for current
callers of __netpoll_free_async.

CC: Neil Horman <nhorman@tuxdriver.com>
CC: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarDebabrata Banerjee <dbanerje@akamai.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2e2d6f03
...@@ -963,7 +963,8 @@ static inline void slave_disable_netpoll(struct slave *slave) ...@@ -963,7 +963,8 @@ static inline void slave_disable_netpoll(struct slave *slave)
return; return;
slave->np = NULL; slave->np = NULL;
__netpoll_free_async(np);
__netpoll_free(np);
} }
static void bond_poll_controller(struct net_device *bond_dev) static void bond_poll_controller(struct net_device *bond_dev)
......
...@@ -1077,7 +1077,7 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev) ...@@ -1077,7 +1077,7 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
vlan->netpoll = NULL; vlan->netpoll = NULL;
__netpoll_free_async(netpoll); __netpoll_free(netpoll);
} }
#endif /* CONFIG_NET_POLL_CONTROLLER */ #endif /* CONFIG_NET_POLL_CONTROLLER */
......
...@@ -1104,10 +1104,7 @@ static void team_port_disable_netpoll(struct team_port *port) ...@@ -1104,10 +1104,7 @@ static void team_port_disable_netpoll(struct team_port *port)
return; return;
port->np = NULL; port->np = NULL;
/* Wait for transmitting packets to finish before freeing. */ __netpoll_free(np);
synchronize_rcu_bh();
__netpoll_cleanup(np);
kfree(np);
} }
#else #else
static int team_port_enable_netpoll(struct team_port *port) static int team_port_enable_netpoll(struct team_port *port)
......
...@@ -31,8 +31,6 @@ struct netpoll { ...@@ -31,8 +31,6 @@ struct netpoll {
bool ipv6; bool ipv6;
u16 local_port, remote_port; u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN]; u8 remote_mac[ETH_ALEN];
struct work_struct cleanup_work;
}; };
struct netpoll_info { struct netpoll_info {
...@@ -63,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt); ...@@ -63,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np); int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free_async(struct netpoll *np); void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np); void netpoll_cleanup(struct netpoll *np);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev); struct net_device *dev);
......
...@@ -756,8 +756,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev) ...@@ -756,8 +756,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
return; return;
vlan->netpoll = NULL; vlan->netpoll = NULL;
__netpoll_free(netpoll);
__netpoll_free_async(netpoll);
} }
#endif /* CONFIG_NET_POLL_CONTROLLER */ #endif /* CONFIG_NET_POLL_CONTROLLER */
......
...@@ -344,7 +344,7 @@ void br_netpoll_disable(struct net_bridge_port *p) ...@@ -344,7 +344,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
p->np = NULL; p->np = NULL;
__netpoll_free_async(np); __netpoll_free(np);
} }
#endif #endif
......
...@@ -57,7 +57,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu); ...@@ -57,7 +57,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu);
MAX_UDP_CHUNK) MAX_UDP_CHUNK)
static void zap_completion_queue(void); static void zap_completion_queue(void);
static void netpoll_async_cleanup(struct work_struct *work);
static unsigned int carrier_timeout = 4; static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644); module_param(carrier_timeout, uint, 0644);
...@@ -589,7 +588,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) ...@@ -589,7 +588,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
np->dev = ndev; np->dev = ndev;
strlcpy(np->dev_name, ndev->name, IFNAMSIZ); strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n", np_err(np, "%s doesn't support polling, aborting\n",
...@@ -788,10 +786,6 @@ void __netpoll_cleanup(struct netpoll *np) ...@@ -788,10 +786,6 @@ void __netpoll_cleanup(struct netpoll *np)
{ {
struct netpoll_info *npinfo; struct netpoll_info *npinfo;
/* rtnl_dereference would be preferable here but
* rcu_cleanup_netpoll path can put us in here safely without
* holding the rtnl, so plain rcu_dereference it is
*/
npinfo = rtnl_dereference(np->dev->npinfo); npinfo = rtnl_dereference(np->dev->npinfo);
if (!npinfo) if (!npinfo)
return; return;
...@@ -812,21 +806,16 @@ void __netpoll_cleanup(struct netpoll *np) ...@@ -812,21 +806,16 @@ void __netpoll_cleanup(struct netpoll *np)
} }
EXPORT_SYMBOL_GPL(__netpoll_cleanup); EXPORT_SYMBOL_GPL(__netpoll_cleanup);
static void netpoll_async_cleanup(struct work_struct *work) void __netpoll_free(struct netpoll *np)
{ {
struct netpoll *np = container_of(work, struct netpoll, cleanup_work); ASSERT_RTNL();
rtnl_lock(); /* Wait for transmitting packets to finish before freeing. */
synchronize_rcu_bh();
__netpoll_cleanup(np); __netpoll_cleanup(np);
rtnl_unlock();
kfree(np); kfree(np);
} }
EXPORT_SYMBOL_GPL(__netpoll_free);
void __netpoll_free_async(struct netpoll *np)
{
schedule_work(&np->cleanup_work);
}
EXPORT_SYMBOL_GPL(__netpoll_free_async);
void netpoll_cleanup(struct netpoll *np) void netpoll_cleanup(struct netpoll *np)
{ {
......
...@@ -722,7 +722,7 @@ static void dsa_slave_netpoll_cleanup(struct net_device *dev) ...@@ -722,7 +722,7 @@ static void dsa_slave_netpoll_cleanup(struct net_device *dev)
p->netpoll = NULL; p->netpoll = NULL;
__netpoll_free_async(netpoll); __netpoll_free(netpoll);
} }
static void dsa_slave_poll_controller(struct net_device *dev) static void dsa_slave_poll_controller(struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment