Commit 9fd6452d authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: conntrack: rename nf_ct_iterate_cleanup

There are several places where we needlesly call nf_ct_iterate_cleanup,
we should instead iterate the full table at module unload time.

This is a leftover from back when the conntrack table got duplicated
per net namespace.

So rename nf_ct_iterate_cleanup to nf_ct_iterate_cleanup_net.
A later patch will then add a non-net variant.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent cad43944
...@@ -225,9 +225,9 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, ...@@ -225,9 +225,9 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
u32 seq); u32 seq);
/* Iterate over all conntracks: if iter returns true, it's deleted. */ /* Iterate over all conntracks: if iter returns true, it's deleted. */
void nf_ct_iterate_cleanup(struct net *net, void nf_ct_iterate_cleanup_net(struct net *net,
int (*iter)(struct nf_conn *i, void *data), int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report); void *data, u32 portid, int report);
struct nf_conntrack_zone; struct nf_conntrack_zone;
......
...@@ -98,8 +98,8 @@ static int masq_device_event(struct notifier_block *this, ...@@ -98,8 +98,8 @@ static int masq_device_event(struct notifier_block *this,
*/ */
NF_CT_ASSERT(dev->ifindex != 0); NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp, nf_ct_iterate_cleanup_net(net, device_cmp,
(void *)(long)dev->ifindex, 0, 0); (void *)(long)dev->ifindex, 0, 0);
} }
return NOTIFY_DONE; return NOTIFY_DONE;
......
...@@ -75,8 +75,8 @@ static int masq_device_event(struct notifier_block *this, ...@@ -75,8 +75,8 @@ static int masq_device_event(struct notifier_block *this,
struct net *net = dev_net(dev); struct net *net = dev_net(dev);
if (event == NETDEV_DOWN) if (event == NETDEV_DOWN)
nf_ct_iterate_cleanup(net, device_cmp, nf_ct_iterate_cleanup_net(net, device_cmp,
(void *)(long)dev->ifindex, 0, 0); (void *)(long)dev->ifindex, 0, 0);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -99,7 +99,7 @@ static void iterate_cleanup_work(struct work_struct *work) ...@@ -99,7 +99,7 @@ static void iterate_cleanup_work(struct work_struct *work)
w = container_of(work, struct masq_dev_work, work); w = container_of(work, struct masq_dev_work, work);
index = w->ifindex; index = w->ifindex;
nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0); nf_ct_iterate_cleanup_net(w->net, device_cmp, (void *)index, 0, 0);
put_net(w->net); put_net(w->net);
kfree(w); kfree(w);
...@@ -110,12 +110,12 @@ static void iterate_cleanup_work(struct work_struct *work) ...@@ -110,12 +110,12 @@ static void iterate_cleanup_work(struct work_struct *work)
/* ipv6 inet notifier is an atomic notifier, i.e. we cannot /* ipv6 inet notifier is an atomic notifier, i.e. we cannot
* schedule. * schedule.
* *
* Unfortunately, nf_ct_iterate_cleanup can run for a long * Unfortunately, nf_ct_iterate_cleanup_net can run for a long
* time if there are lots of conntracks and the system * time if there are lots of conntracks and the system
* handles high softirq load, so it frequently calls cond_resched * handles high softirq load, so it frequently calls cond_resched
* while iterating the conntrack table. * while iterating the conntrack table.
* *
* So we defer nf_ct_iterate_cleanup walk to the system workqueue. * So we defer nf_ct_iterate_cleanup_net walk to the system workqueue.
* *
* As we can have 'a lot' of inet_events (depending on amount * As we can have 'a lot' of inet_events (depending on amount
* of ipv6 addresses being deleted), we also need to add an upper * of ipv6 addresses being deleted), we also need to add an upper
......
...@@ -1634,9 +1634,9 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), ...@@ -1634,9 +1634,9 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
return ct; return ct;
} }
void nf_ct_iterate_cleanup(struct net *net, void nf_ct_iterate_cleanup_net(struct net *net,
int (*iter)(struct nf_conn *i, void *data), int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report) void *data, u32 portid, int report)
{ {
struct nf_conn *ct; struct nf_conn *ct;
unsigned int bucket = 0; unsigned int bucket = 0;
...@@ -1654,7 +1654,7 @@ void nf_ct_iterate_cleanup(struct net *net, ...@@ -1654,7 +1654,7 @@ void nf_ct_iterate_cleanup(struct net *net,
cond_resched(); cond_resched();
} }
} }
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
static int kill_all(struct nf_conn *i, void *data) static int kill_all(struct nf_conn *i, void *data)
{ {
...@@ -1723,7 +1723,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) ...@@ -1723,7 +1723,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
i_see_dead_people: i_see_dead_people:
busy = 0; busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) { list_for_each_entry(net, net_exit_list, exit_list) {
nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0); nf_ct_iterate_cleanup_net(net, kill_all, NULL, 0, 0);
if (atomic_read(&net->ct.count) != 0) if (atomic_read(&net->ct.count) != 0)
busy = 1; busy = 1;
} }
......
...@@ -1117,8 +1117,8 @@ static int ctnetlink_flush_conntrack(struct net *net, ...@@ -1117,8 +1117,8 @@ static int ctnetlink_flush_conntrack(struct net *net,
return PTR_ERR(filter); return PTR_ERR(filter);
} }
nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter, nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
portid, report); portid, report);
kfree(filter); kfree(filter);
return 0; return 0;
......
...@@ -282,7 +282,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net, ...@@ -282,7 +282,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
proto->net_ns_put(net); proto->net_ns_put(net);
/* Remove all contrack entries for this protocol */ /* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0); nf_ct_iterate_cleanup_net(net, kill_l3proto, proto, 0, 0);
} }
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister); EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
...@@ -450,7 +450,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net, ...@@ -450,7 +450,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto); nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */ /* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0); nf_ct_iterate_cleanup_net(net, kill_l4proto, l4proto, 0, 0);
} }
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one); EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
......
...@@ -586,7 +586,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) ...@@ -586,7 +586,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
rtnl_lock(); rtnl_lock();
for_each_net(net) for_each_net(net)
nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); nf_ct_iterate_cleanup_net(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock(); rtnl_unlock();
} }
...@@ -600,7 +600,7 @@ static void nf_nat_l3proto_clean(u8 l3proto) ...@@ -600,7 +600,7 @@ static void nf_nat_l3proto_clean(u8 l3proto)
rtnl_lock(); rtnl_lock();
for_each_net(net) for_each_net(net)
nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); nf_ct_iterate_cleanup_net(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock(); rtnl_unlock();
} }
...@@ -826,7 +826,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) ...@@ -826,7 +826,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{ {
struct nf_nat_proto_clean clean = {}; struct nf_nat_proto_clean clean = {};
nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0); nf_ct_iterate_cleanup_net(net, nf_nat_proto_clean, &clean, 0, 0);
} }
static struct pernet_operations nf_nat_net_ops = { static struct pernet_operations nf_nat_net_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment