Commit 2cf12348 authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: conntrack: keep BH enabled during lookup

No need to disable BH here anymore:

stats are switched to _ATOMIC variant (== this_cpu_inc()), which
nowadays generates same code as the non _ATOMIC NF_STAT, at least on x86.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 1ad8f48d
...@@ -472,18 +472,13 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, ...@@ -472,18 +472,13 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
unsigned int bucket = hash_bucket(hash, net); unsigned int bucket = hash_bucket(hash, net);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
*/
local_bh_disable();
begin: begin:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
if (nf_ct_key_equal(h, tuple, zone)) { if (nf_ct_key_equal(h, tuple, zone)) {
NF_CT_STAT_INC(net, found); NF_CT_STAT_INC_ATOMIC(net, found);
local_bh_enable();
return h; return h;
} }
NF_CT_STAT_INC(net, searched); NF_CT_STAT_INC_ATOMIC(net, searched);
} }
/* /*
* if the nulls value we got at the end of this lookup is * if the nulls value we got at the end of this lookup is
...@@ -491,10 +486,9 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, ...@@ -491,10 +486,9 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
* We probably met an item that was moved to another chain. * We probably met an item that was moved to another chain.
*/ */
if (get_nulls_value(n) != bucket) { if (get_nulls_value(n) != bucket) {
NF_CT_STAT_INC(net, search_restart); NF_CT_STAT_INC_ATOMIC(net, search_restart);
goto begin; goto begin;
} }
local_bh_enable();
return NULL; return NULL;
} }
...@@ -735,22 +729,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, ...@@ -735,22 +729,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
zone = nf_ct_zone(ignored_conntrack); zone = nf_ct_zone(ignored_conntrack);
hash = hash_conntrack(net, tuple); hash = hash_conntrack(net, tuple);
/* Disable BHs the entire time since we need to disable them at rcu_read_lock();
* least once for the stats anyway.
*/
rcu_read_lock_bh();
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h); ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack && if (ct != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_tuple_equal(tuple, &h->tuple) &&
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) { nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
NF_CT_STAT_INC(net, found); NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock_bh(); rcu_read_unlock();
return 1; return 1;
} }
NF_CT_STAT_INC(net, searched); NF_CT_STAT_INC_ATOMIC(net, searched);
} }
rcu_read_unlock_bh(); rcu_read_unlock();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment