Commit 8ca712c3 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-speedup-netns-create-delete-time'

Eric Dumazet says:

====================
net: speedup netns create/delete time

When rate of netns creation/deletion is high enough,
we observe softlockups in cleanup_net() caused by huge list
of netns and way too many rcu_barrier() calls.

This patch series does some optimizations in kobject,
and add batching to tunnels so that netns dismantles are
less costly.

IPv6 addrlabels also get a per netns list, and tcp_metrics
also benefit from batch flushing.

This gives me one order of magnitude gain.
(~50 ms -> ~5 ms for one netns create/delete pair)

Tested:

for i in `seq 1 40`
do
 (for j in `seq 1 100` ; do  unshare -n /bin/true >/dev/null ; done) &
done
wait ; grep net_namespace /proc/slabinfo

Before patch series :

$ time ./add_del_unshare.sh
net_namespace        116    258   5504    1    2 : tunables    8    4    0 : slabdata    116    258      0

real	3m24.910s
user	0m0.747s
sys	0m43.162s

After :
$ time ./add_del_unshare.sh
net_namespace        135    291   5504    1    2 : tunables    8    4    0 : slabdata    135    291      0

real	0m22.117s
user	0m0.728s
sys	0m35.328s
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 752fbcc3 64bc1781
......@@ -258,7 +258,8 @@ int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
......
......@@ -89,6 +89,11 @@ struct netns_ipv6 {
atomic_t fib6_sernum;
struct seg6_pernet_data *seg6_data;
struct fib_notifier_ops *notifier_ops;
struct {
struct hlist_head head;
spinlock_t lock;
u32 seq;
} ip6addrlbl_table;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
......
......@@ -294,6 +294,55 @@ static void cleanup_uevent_env(struct subprocess_info *info)
}
#endif
static int kobject_uevent_net_broadcast(struct kobject *kobj,
struct kobj_uevent_env *env,
const char *action_string,
const char *devpath)
{
int retval = 0;
#if defined(CONFIG_NET)
struct sk_buff *skb = NULL;
struct uevent_sock *ue_sk;
/* send netlink message */
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk;
if (!netlink_has_listeners(uevent_sock, 1))
continue;
if (!skb) {
/* allocate message with the maximum possible size */
size_t len = strlen(action_string) + strlen(devpath) + 2;
char *scratch;
retval = -ENOMEM;
skb = alloc_skb(len + env->buflen, GFP_KERNEL);
if (!skb)
continue;
/* add header */
scratch = skb_put(skb, len);
sprintf(scratch, "%s@%s", action_string, devpath);
skb_put_data(skb, env->buf, env->buflen);
NETLINK_CB(skb).dst_group = 1;
}
retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb),
0, 1, GFP_KERNEL,
kobj_bcast_filter,
kobj);
/* ENOBUFS should be handled in userspace */
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
}
consume_skb(skb);
#endif
return retval;
}
/**
* kobject_uevent_env - send an uevent with environmental data
*
......@@ -316,9 +365,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
const struct kset_uevent_ops *uevent_ops;
int i = 0;
int retval = 0;
#ifdef CONFIG_NET
struct uevent_sock *ue_sk;
#endif
pr_debug("kobject: '%s' (%p): %s\n",
kobject_name(kobj), kobj, __func__);
......@@ -427,46 +473,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
mutex_unlock(&uevent_sock_mutex);
goto exit;
}
#if defined(CONFIG_NET)
/* send netlink message */
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk;
struct sk_buff *skb;
size_t len;
if (!netlink_has_listeners(uevent_sock, 1))
continue;
/* allocate message with the maximum possible size */
len = strlen(action_string) + strlen(devpath) + 2;
skb = alloc_skb(len + env->buflen, GFP_KERNEL);
if (skb) {
char *scratch;
/* add header */
scratch = skb_put(skb, len);
sprintf(scratch, "%s@%s", action_string, devpath);
/* copy keys to our continuous event payload buffer */
for (i = 0; i < env->envp_idx; i++) {
len = strlen(env->envp[i]) + 1;
scratch = skb_put(skb, len);
strcpy(scratch, env->envp[i]);
}
NETLINK_CB(skb).dst_group = 1;
retval = netlink_broadcast_filtered(uevent_sock, skb,
0, 1, GFP_KERNEL,
kobj_bcast_filter,
kobj);
/* ENOBUFS should be handled in userspace */
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
} else
retval = -ENOMEM;
}
#endif
retval = kobject_uevent_net_broadcast(kobj, env, action_string,
devpath);
mutex_unlock(&uevent_sock_mutex);
#ifdef CONFIG_UEVENT_HELPER
......
......@@ -1013,15 +1013,14 @@ static int __net_init ipgre_init_net(struct net *net)
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
static void __net_exit ipgre_exit_net(struct net *net)
static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
{
struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
ip_tunnel_delete_net(itn, &ipgre_link_ops);
ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
.exit = ipgre_exit_net,
.exit_batch = ipgre_exit_batch_net,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
......@@ -1540,15 +1539,14 @@ static int __net_init ipgre_tap_init_net(struct net *net)
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
static void __net_exit ipgre_tap_exit_net(struct net *net)
static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
{
struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
ip_tunnel_delete_net(itn, &ipgre_tap_ops);
ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
.exit = ipgre_tap_exit_net,
.exit_batch = ipgre_tap_exit_batch_net,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
......@@ -1559,16 +1557,14 @@ static int __net_init erspan_init_net(struct net *net)
&erspan_link_ops, "erspan0");
}
static void __net_exit erspan_exit_net(struct net *net)
static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
{
struct ip_tunnel_net *itn = net_generic(net, erspan_net_id);
ip_tunnel_delete_net(itn, &erspan_link_ops);
ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
.exit = erspan_exit_net,
.exit_batch = erspan_exit_batch_net,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};
......
......@@ -1061,16 +1061,22 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
}
}
void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
struct rtnl_link_ops *ops)
{
struct ip_tunnel_net *itn;
struct net *net;
LIST_HEAD(list);
rtnl_lock();
ip_tunnel_destroy(itn, &list, ops);
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
ip_tunnel_destroy(itn, &list, ops);
}
unregister_netdevice_many(&list);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark)
......
......@@ -452,15 +452,14 @@ static int __net_init vti_init_net(struct net *net)
return 0;
}
static void __net_exit vti_exit_net(struct net *net)
static void __net_exit vti_exit_batch_net(struct list_head *list_net)
{
struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
ip_tunnel_delete_net(itn, &vti_link_ops);
ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
.exit = vti_exit_net,
.exit_batch = vti_exit_batch_net,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
};
......
......@@ -634,15 +634,14 @@ static int __net_init ipip_init_net(struct net *net)
return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
}
static void __net_exit ipip_exit_net(struct net *net)
static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
{
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
ip_tunnel_delete_net(itn, &ipip_link_ops);
ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
.exit = ipip_exit_net,
.exit_batch = ipip_exit_batch_net,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
};
......
......@@ -892,10 +892,14 @@ static void tcp_metrics_flush_all(struct net *net)
for (row = 0; row < max_rows; row++, hb++) {
struct tcp_metrics_block __rcu **pp;
bool match;
spin_lock_bh(&tcp_metrics_lock);
pp = &hb->chain;
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
if (net_eq(tm_net(tm), net)) {
match = net ? net_eq(tm_net(tm), net) :
!atomic_read(&tm_net(tm)->count);
if (match) {
*pp = tm->tcpm_next;
kfree_rcu(tm, rcu_head);
} else {
......@@ -1018,14 +1022,14 @@ static int __net_init tcp_net_metrics_init(struct net *net)
return 0;
}
static void __net_exit tcp_net_metrics_exit(struct net *net)
static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
{
tcp_metrics_flush_all(net);
tcp_metrics_flush_all(NULL);
}
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
.init = tcp_net_metrics_init,
.exit = tcp_net_metrics_exit,
.init = tcp_net_metrics_init,
.exit_batch = tcp_net_metrics_exit_batch,
};
void __init tcp_metrics_init(void)
......
......@@ -30,7 +30,6 @@
* Policy Table
*/
struct ip6addrlbl_entry {
possible_net_t lbl_net;
struct in6_addr prefix;
int prefixlen;
int ifindex;
......@@ -41,19 +40,6 @@ struct ip6addrlbl_entry {
struct rcu_head rcu;
};
static struct ip6addrlbl_table
{
struct hlist_head head;
spinlock_t lock;
u32 seq;
} ip6addrlbl_table;
static inline
struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
{
return read_pnet(&lbl->lbl_net);
}
/*
* Default policy table (RFC6724 + extensions)
*
......@@ -148,13 +134,10 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
}
/* Find label */
static bool __ip6addrlbl_match(struct net *net,
const struct ip6addrlbl_entry *p,
static bool __ip6addrlbl_match(const struct ip6addrlbl_entry *p,
const struct in6_addr *addr,
int addrtype, int ifindex)
{
if (!net_eq(ip6addrlbl_net(p), net))
return false;
if (p->ifindex && p->ifindex != ifindex)
return false;
if (p->addrtype && p->addrtype != addrtype)
......@@ -169,8 +152,9 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
int type, int ifindex)
{
struct ip6addrlbl_entry *p;
hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
if (__ip6addrlbl_match(net, p, addr, type, ifindex))
hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
if (__ip6addrlbl_match(p, addr, type, ifindex))
return p;
}
return NULL;
......@@ -196,8 +180,7 @@ u32 ipv6_addr_label(struct net *net,
}
/* allocate one entry */
static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
const struct in6_addr *prefix,
static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix,
int prefixlen, int ifindex,
u32 label)
{
......@@ -236,24 +219,23 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
newp->addrtype = addrtype;
newp->label = label;
INIT_HLIST_NODE(&newp->list);
write_pnet(&newp->lbl_net, net);
refcount_set(&newp->refcnt, 1);
return newp;
}
/* add a label */
static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
static int __ip6addrlbl_add(struct net *net, struct ip6addrlbl_entry *newp,
int replace)
{
struct hlist_node *n;
struct ip6addrlbl_entry *last = NULL, *p = NULL;
struct hlist_node *n;
int ret = 0;
ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
replace);
hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
if (p->prefixlen == newp->prefixlen &&
net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
p->ifindex == newp->ifindex &&
ipv6_addr_equal(&p->prefix, &newp->prefix)) {
if (!replace) {
......@@ -273,10 +255,10 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
if (last)
hlist_add_behind_rcu(&newp->list, &last->list);
else
hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
hlist_add_head_rcu(&newp->list, &net->ipv6.ip6addrlbl_table.head);
out:
if (!ret)
ip6addrlbl_table.seq++;
net->ipv6.ip6addrlbl_table.seq++;
return ret;
}
......@@ -292,12 +274,12 @@ static int ip6addrlbl_add(struct net *net,
__func__, prefix, prefixlen, ifindex, (unsigned int)label,
replace);
newp = ip6addrlbl_alloc(net, prefix, prefixlen, ifindex, label);
newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label);
if (IS_ERR(newp))
return PTR_ERR(newp);
spin_lock(&ip6addrlbl_table.lock);
ret = __ip6addrlbl_add(newp, replace);
spin_unlock(&ip6addrlbl_table.lock);
spin_lock(&net->ipv6.ip6addrlbl_table.lock);
ret = __ip6addrlbl_add(net, newp, replace);
spin_unlock(&net->ipv6.ip6addrlbl_table.lock);
if (ret)
ip6addrlbl_free(newp);
return ret;
......@@ -315,9 +297,8 @@ static int __ip6addrlbl_del(struct net *net,
ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
__func__, prefix, prefixlen, ifindex);
hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
if (p->prefixlen == prefixlen &&
net_eq(ip6addrlbl_net(p), net) &&
p->ifindex == ifindex &&
ipv6_addr_equal(&p->prefix, prefix)) {
hlist_del_rcu(&p->list);
......@@ -340,9 +321,9 @@ static int ip6addrlbl_del(struct net *net,
__func__, prefix, prefixlen, ifindex);
ipv6_addr_prefix(&prefix_buf, prefix, prefixlen);
spin_lock(&ip6addrlbl_table.lock);
spin_lock(&net->ipv6.ip6addrlbl_table.lock);
ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex);
spin_unlock(&ip6addrlbl_table.lock);
spin_unlock(&net->ipv6.ip6addrlbl_table.lock);
return ret;
}
......@@ -354,6 +335,9 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
ADDRLABEL(KERN_DEBUG "%s\n", __func__);
spin_lock_init(&net->ipv6.ip6addrlbl_table.lock);
INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head);
for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
int ret = ip6addrlbl_add(net,
ip6addrlbl_init_table[i].prefix,
......@@ -373,14 +357,12 @@ static void __net_exit ip6addrlbl_net_exit(struct net *net)
struct hlist_node *n;
/* Remove all labels belonging to the exiting net */
spin_lock(&ip6addrlbl_table.lock);
hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
if (net_eq(ip6addrlbl_net(p), net)) {
hlist_del_rcu(&p->list);
ip6addrlbl_put(p);
}
spin_lock(&net->ipv6.ip6addrlbl_table.lock);
hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
hlist_del_rcu(&p->list);
ip6addrlbl_put(p);
}
spin_unlock(&ip6addrlbl_table.lock);
spin_unlock(&net->ipv6.ip6addrlbl_table.lock);
}
static struct pernet_operations ipv6_addr_label_ops = {
......@@ -390,8 +372,6 @@ static struct pernet_operations ipv6_addr_label_ops = {
int __init ipv6_addr_label_init(void)
{
spin_lock_init(&ip6addrlbl_table.lock);
return register_pernet_subsys(&ipv6_addr_label_ops);
}
......@@ -510,11 +490,10 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
int err;
rcu_read_lock();
hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
if (idx >= s_idx &&
net_eq(ip6addrlbl_net(p), net)) {
hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
if (idx >= s_idx) {
err = ip6addrlbl_fill(skb, p,
ip6addrlbl_table.seq,
net->ipv6.ip6addrlbl_table.seq,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWADDRLABEL,
......@@ -571,7 +550,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
if (p && !ip6addrlbl_hold(p))
p = NULL;
lseq = ip6addrlbl_table.seq;
lseq = net->ipv6.ip6addrlbl_table.seq;
rcu_read_unlock();
if (!p) {
......
......@@ -1155,19 +1155,21 @@ static int __net_init ip6gre_init_net(struct net *net)
return err;
}
static void __net_exit ip6gre_exit_net(struct net *net)
static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
rtnl_lock();
ip6gre_destroy_tunnels(net, &list);
list_for_each_entry(net, net_list, exit_list)
ip6gre_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
.exit = ip6gre_exit_net,
.exit_batch = ip6gre_exit_batch_net,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
......
......@@ -2167,17 +2167,16 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
.priority = 1,
};
static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
{
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct net_device *dev, *aux;
int h;
struct ip6_tnl *t;
LIST_HEAD(list);
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &ip6_link_ops)
unregister_netdevice_queue(dev, &list);
unregister_netdevice_queue(dev, list);
for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
......@@ -2186,12 +2185,10 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_queue(t->dev, list);
t = rtnl_dereference(t->next);
}
}
unregister_netdevice_many(&list);
}
static int __net_init ip6_tnl_init_net(struct net *net)
......@@ -2235,16 +2232,21 @@ static int __net_init ip6_tnl_init_net(struct net *net)
return err;
}
static void __net_exit ip6_tnl_exit_net(struct net *net)
static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
rtnl_lock();
ip6_tnl_destroy_tunnels(net);
list_for_each_entry(net, net_list, exit_list)
ip6_tnl_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
.exit = ip6_tnl_exit_net,
.exit_batch = ip6_tnl_exit_batch_net,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};
......
......@@ -1052,23 +1052,22 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
.get_link_net = ip6_tnl_get_link_net,
};
static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
struct list_head *list)
{
int h;
struct ip6_tnl *t;
LIST_HEAD(list);
for (h = 0; h < IP6_VTI_HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t) {
unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_queue(t->dev, list);
t = rtnl_dereference(t->next);
}
}
t = rtnl_dereference(ip6n->tnls_wc[0]);
unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_many(&list);
unregister_netdevice_queue(t->dev, list);
}
static int __net_init vti6_init_net(struct net *net)
......@@ -1108,18 +1107,24 @@ static int __net_init vti6_init_net(struct net *net)
return err;
}
static void __net_exit vti6_exit_net(struct net *net)
static void __net_exit vti6_exit_batch_net(struct list_head *net_list)
{
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
struct vti6_net *ip6n;
struct net *net;
LIST_HEAD(list);
rtnl_lock();
vti6_destroy_tunnels(ip6n);
list_for_each_entry(net, net_list, exit_list) {
ip6n = net_generic(net, vti6_net_id);
vti6_destroy_tunnels(ip6n, &list);
}
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations vti6_net_ops = {
.init = vti6_init_net,
.exit = vti6_exit_net,
.exit_batch = vti6_exit_batch_net,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
};
......
......@@ -1848,19 +1848,22 @@ static int __net_init sit_init_net(struct net *net)
return err;
}
static void __net_exit sit_exit_net(struct net *net)
static void __net_exit sit_exit_batch_net(struct list_head *net_list)
{
LIST_HEAD(list);
struct net *net;
rtnl_lock();
sit_destroy_tunnels(net, &list);
list_for_each_entry(net, net_list, exit_list)
sit_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
.exit = sit_exit_net,
.exit_batch = sit_exit_batch_net,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment