Commit 8a75a2c1 authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: conntrack: remove unconfirmed list

It has no function anymore and can be removed.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent ace53fdc
...@@ -101,7 +101,6 @@ struct nf_conn { ...@@ -101,7 +101,6 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */ /* Have we seen traffic both ways yet? (bitset) */
unsigned long status; unsigned long status;
u16 cpu;
possible_net_t ct_net; possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT) #if IS_ENABLED(CONFIG_NF_NAT)
......
...@@ -93,11 +93,6 @@ struct nf_ip_net { ...@@ -93,11 +93,6 @@ struct nf_ip_net {
#endif #endif
}; };
struct ct_pcpu {
spinlock_t lock;
struct hlist_nulls_head unconfirmed;
};
struct netns_ct { struct netns_ct {
#ifdef CONFIG_NF_CONNTRACK_EVENTS #ifdef CONFIG_NF_CONNTRACK_EVENTS
bool ecache_dwork_pending; bool ecache_dwork_pending;
...@@ -109,7 +104,6 @@ struct netns_ct { ...@@ -109,7 +104,6 @@ struct netns_ct {
u8 sysctl_tstamp; u8 sysctl_tstamp;
u8 sysctl_checksum; u8 sysctl_checksum;
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat; struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
struct nf_ip_net nf_ct_proto; struct nf_ip_net nf_ct_proto;
......
...@@ -525,35 +525,6 @@ clean_from_lists(struct nf_conn *ct) ...@@ -525,35 +525,6 @@ clean_from_lists(struct nf_conn *ct)
nf_ct_remove_expectations(ct); nf_ct_remove_expectations(ct);
} }
/* must be called with local_bh_disable */
static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
{
struct ct_pcpu *pcpu;
/* add this conntrack to the (per cpu) unconfirmed list */
ct->cpu = smp_processor_id();
pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
spin_lock(&pcpu->lock);
hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&pcpu->unconfirmed);
spin_unlock(&pcpu->lock);
}
/* must be called with local_bh_disable */
static void nf_ct_del_from_unconfirmed_list(struct nf_conn *ct)
{
struct ct_pcpu *pcpu;
/* We overload first tuple to link into unconfirmed list.*/
pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
spin_lock(&pcpu->lock);
BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
spin_unlock(&pcpu->lock);
}
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
/* Released via nf_ct_destroy() */ /* Released via nf_ct_destroy() */
...@@ -625,7 +596,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct) ...@@ -625,7 +596,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE)) if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
destroy_gre_conntrack(ct); destroy_gre_conntrack(ct);
local_bh_disable();
/* Expectations will have been removed in clean_from_lists, /* Expectations will have been removed in clean_from_lists,
* except TFTP can create an expectation on the first packet, * except TFTP can create an expectation on the first packet,
* before connection is in the list, so we need to clean here, * before connection is in the list, so we need to clean here,
...@@ -633,11 +603,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct) ...@@ -633,11 +603,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
*/ */
nf_ct_remove_expectations(ct); nf_ct_remove_expectations(ct);
if (unlikely(!nf_ct_is_confirmed(ct)))
nf_ct_del_from_unconfirmed_list(ct);
local_bh_enable();
if (ct->master) if (ct->master)
nf_ct_put(ct->master); nf_ct_put(ct->master);
...@@ -1248,7 +1213,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -1248,7 +1213,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking * user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM. * further use of that particular connection -JM.
*/ */
nf_ct_del_from_unconfirmed_list(ct);
ct->status |= IPS_CONFIRMED; ct->status |= IPS_CONFIRMED;
if (unlikely(nf_ct_is_dying(ct))) { if (unlikely(nf_ct_is_dying(ct))) {
...@@ -1803,9 +1767,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, ...@@ -1803,9 +1767,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (!exp) if (!exp)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
/* Now it is inserted into the unconfirmed list, set refcount to 1. */ /* Now it is going to be associated with an sk_buff, set refcount to 1. */
refcount_set(&ct->ct_general.use, 1); refcount_set(&ct->ct_general.use, 1);
nf_ct_add_to_unconfirmed_list(ct);
local_bh_enable(); local_bh_enable();
...@@ -2594,7 +2557,6 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) ...@@ -2594,7 +2557,6 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
nf_conntrack_ecache_pernet_fini(net); nf_conntrack_ecache_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net); nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat); free_percpu(net->ct.stat);
free_percpu(net->ct.pcpu_lists);
} }
} }
...@@ -2805,26 +2767,14 @@ int nf_conntrack_init_net(struct net *net) ...@@ -2805,26 +2767,14 @@ int nf_conntrack_init_net(struct net *net)
{ {
struct nf_conntrack_net *cnet = nf_ct_pernet(net); struct nf_conntrack_net *cnet = nf_ct_pernet(net);
int ret = -ENOMEM; int ret = -ENOMEM;
int cpu;
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER); BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS); BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
atomic_set(&cnet->count, 0); atomic_set(&cnet->count, 0);
net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
if (!net->ct.pcpu_lists)
goto err_stat;
for_each_possible_cpu(cpu) {
struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_init(&pcpu->lock);
INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
}
net->ct.stat = alloc_percpu(struct ip_conntrack_stat); net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat) if (!net->ct.stat)
goto err_pcpu_lists; return ret;
ret = nf_conntrack_expect_pernet_init(net); ret = nf_conntrack_expect_pernet_init(net);
if (ret < 0) if (ret < 0)
...@@ -2840,8 +2790,5 @@ int nf_conntrack_init_net(struct net *net) ...@@ -2840,8 +2790,5 @@ int nf_conntrack_init_net(struct net *net)
err_expect: err_expect:
free_percpu(net->ct.stat); free_percpu(net->ct.stat);
err_pcpu_lists:
free_percpu(net->ct.pcpu_lists);
err_stat:
return ret; return ret;
} }
...@@ -1752,49 +1752,7 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb, ...@@ -1752,49 +1752,7 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
static int static int
ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; return 0;
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct net *net = sock_net(skb->sk);
int res, cpu;
if (ctx->done)
return 0;
last = ctx->last;
for (cpu = ctx->cpu; cpu < nr_cpu_ids; cpu++) {
struct ct_pcpu *pcpu;
if (!cpu_possible(cpu))
continue;
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
restart:
hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
res = ctnetlink_dump_one_entry(skb, cb, ct, false);
if (res < 0) {
ctx->cpu = cpu;
spin_unlock_bh(&pcpu->lock);
goto out;
}
}
if (ctx->last) {
ctx->last = NULL;
goto restart;
}
spin_unlock_bh(&pcpu->lock);
}
ctx->done = true;
out:
if (last)
nf_ct_put(last);
return skb->len;
} }
static int static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment