Commit 110d3047 authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski

vxlan: use exit_batch_rtnl() method

exit_batch_rtnl() is called while RTNL is held,
and devices to be unregistered can be queued in the dev_kill_list.

This saves one rtnl_lock()/rtnl_unlock() pair per netns
and one unregister_netdevice_many() call.

v4: (Paolo feedback : https://netdev-3.bots.linux.dev/vmksft-net/results/453141/17-udpgro-fwd-sh/stdout )
  - Changed vxlan_destroy_tunnels() to use vxlan_dellink()
    instead of unregister_netdevice_queue to propely remove
    devices from vn->vxlan_list.
  - vxlan_destroy_tunnels() can simply iterate one list (vn->vxlan_list)
    to find all devices in the most efficient way.
  - Moved sanity checks in a separate vxlan_exit_net() method.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarAntoine Tenart <atenart@kernel.org>
Link: https://lore.kernel.org/r/20240206144313.2050392-10-edumazet@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 70f16ea2
...@@ -4826,55 +4826,43 @@ static __net_init int vxlan_init_net(struct net *net) ...@@ -4826,55 +4826,43 @@ static __net_init int vxlan_init_net(struct net *net)
NULL); NULL);
} }
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
struct list_head *dev_to_kill)
{ {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next; struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
unregister_netdevice_queue(dev, head);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, head);
}
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
vxlan_dellink(vxlan->dev, dev_to_kill);
} }
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{ {
struct net *net; struct net *net;
LIST_HEAD(list);
unsigned int h;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) { list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unregister_nexthop_notifier(net, &vn->nexthop_notifier_block); __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
unregister_netdevice_many(&list); vxlan_destroy_tunnels(vn, dev_to_kill);
rtnl_unlock(); }
}
list_for_each_entry(net, net_list, exit_list) { static void __net_exit vxlan_exit_net(struct net *net)
struct vxlan_net *vn = net_generic(net, vxlan_net_id); {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int h;
for (h = 0; h < PORT_HASH_SIZE; ++h) for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
} }
static struct pernet_operations vxlan_net_ops = { static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net, .init = vxlan_init_net,
.exit_batch = vxlan_exit_batch_net, .exit_batch_rtnl = vxlan_exit_batch_rtnl,
.exit = vxlan_exit_net,
.id = &vxlan_net_id, .id = &vxlan_net_id,
.size = sizeof(struct vxlan_net), .size = sizeof(struct vxlan_net),
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment