Commit 57b61127 authored by Haishuang Yan's avatar Haishuang Yan Committed by David S. Miller

vxlan: speedup vxlan tunnels dismantle

Since we now hold RTNL lock in vxlan_exit_net, it's better to batch them
to speedup vxlan tunnels dismantle.
Signed-off-by: default avatarHaishuang Yan <yanhaishuang@cmss.chinamobile.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 41b0cd36
...@@ -3692,18 +3692,16 @@ static __net_init int vxlan_init_net(struct net *net) ...@@ -3692,18 +3692,16 @@ static __net_init int vxlan_init_net(struct net *net)
return 0; return 0;
} }
static void __net_exit vxlan_exit_net(struct net *net) static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
{ {
struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next; struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux; struct net_device *dev, *aux;
unsigned int h; unsigned int h;
LIST_HEAD(list);
rtnl_lock();
for_each_netdev_safe(net, dev, aux) for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops) if (dev->rtnl_link_ops == &vxlan_link_ops)
unregister_netdevice_queue(dev, &list); unregister_netdevice_queue(dev, head);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added /* If vxlan->dev is in the same netns, it has already been added
...@@ -3711,20 +3709,30 @@ static void __net_exit vxlan_exit_net(struct net *net) ...@@ -3711,20 +3709,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
*/ */
if (!net_eq(dev_net(vxlan->dev), net)) { if (!net_eq(dev_net(vxlan->dev), net)) {
gro_cells_destroy(&vxlan->gro_cells); gro_cells_destroy(&vxlan->gro_cells);
unregister_netdevice_queue(vxlan->dev, &list); unregister_netdevice_queue(vxlan->dev, head);
} }
} }
unregister_netdevice_many(&list);
rtnl_unlock();
for (h = 0; h < PORT_HASH_SIZE; ++h) for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
} }
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations vxlan_net_ops = { static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net, .init = vxlan_init_net,
.exit = vxlan_exit_net, .exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id, .id = &vxlan_net_id,
.size = sizeof(struct vxlan_net), .size = sizeof(struct vxlan_net),
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment