Commit deedb590 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Pablo Neira Ayuso

netfilter: nf_conntrack: add direction support for zones

This work adds a direction parameter to netfilter zones, so identity
separation can be performed only in original/reply or both directions
(default). This basically opens up the possibility of doing NAT with
conflicting IP address/port tuples from multiple, isolated tenants
on a host (e.g. from a netns) without requiring each tenant to NAT
twice resp. to use its own dedicated IP address to SNAT to, meaning
overlapping tuples can be made unique with the zone identifier in
original direction, where the NAT engine will then allocate a unique
tuple in the commonly shared default zone for the reply direction.
In some restricted, local DNAT cases, also port redirection could be
used for making the reply traffic unique w/o requiring SNAT.

The consensus we've reached and discussed at NFWS and since the initial
implementation [1] was to directly integrate the direction meta data
into the existing zones infrastructure, as opposed to the ct->mark
approach we proposed initially.

As we pass the nf_conntrack_zone object directly around, we don't have
to touch all call-sites, but only those, that contain equality checks
of zones. Thus, based on the current direction (original or reply),
we either return the actual id, or the default NF_CT_DEFAULT_ZONE_ID.
CT expectations are direction-agnostic entities when expectations are
being compared among themselves, so we can only use the identifier
in this case.

Note that zone identifiers can not be included into the hash mix
anymore as they don't contain a "stable" value that would be equal
for both directions at all times, f.e. if only zone->id would
unconditionally be xor'ed into the table slot hash, then replies won't
find the corresponding conntracking entry anymore.

If no particular direction is specified when configuring zones, the
behaviour is exactly as we expect currently (both directions).

Support has been added for the CT netlink interface as well as the
x_tables raw CT target, which both already offer existing interfaces
to user space for the configuration of zones.

Below a minimal, simplified collision example (script in [2]) with
netperf sessions:

  +--- tenant-1 ---+   mark := 1
  |    netperf     |--+
  +----------------+  |                CT zone := mark [ORIGINAL]
   [ip,sport] := X   +--------------+  +--- gateway ---+
                     | mark routing |--|     SNAT      |-- ... +
                     +--------------+  +---------------+       |
  +--- tenant-2 ---+  |                                     ~~~|~~~
  |    netperf     |--+                +-----------+           |
  +----------------+   mark := 2       | netserver |------ ... +
   [ip,sport] := X                     +-----------+
                                        [ip,port] := Y
On the gateway netns, example:

  iptables -t raw -A PREROUTING -j CT --zone mark --zone-dir ORIGINAL
  iptables -t nat -A POSTROUTING -o <dev> -j SNAT --to-source <ip> --random-fully

  iptables -t mangle -A PREROUTING -m conntrack --ctdir ORIGINAL -j CONNMARK --save-mark
  iptables -t mangle -A POSTROUTING -m conntrack --ctdir REPLY -j CONNMARK --restore-mark

conntrack dump from gateway netns:

  netperf -H 10.1.1.2 -t TCP_STREAM -l60 -p12865,5555 from each tenant netns

  tcp 6 431995 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=5555 dport=12865 zone-orig=1
                           src=10.1.1.2 dst=10.1.1.1 sport=12865 dport=1024
               [ASSURED] mark=1 secctx=system_u:object_r:unlabeled_t:s0 use=1

  tcp 6 431994 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=5555 dport=12865 zone-orig=2
                           src=10.1.1.2 dst=10.1.1.1 sport=12865 dport=5555
               [ASSURED] mark=2 secctx=system_u:object_r:unlabeled_t:s0 use=1

  tcp 6 299 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=39438 dport=33768 zone-orig=1
                        src=10.1.1.2 dst=10.1.1.1 sport=33768 dport=39438
               [ASSURED] mark=1 secctx=system_u:object_r:unlabeled_t:s0 use=1

  tcp 6 300 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=32889 dport=40206 zone-orig=2
                        src=10.1.1.2 dst=10.1.1.1 sport=40206 dport=32889
               [ASSURED] mark=2 secctx=system_u:object_r:unlabeled_t:s0 use=2

Taking this further, test script in [2] creates 200 tenants and runs
original-tuple colliding netperf sessions each. A conntrack -L dump in
the gateway netns also confirms 200 overlapping entries, all in ESTABLISHED
state as expected.

I also did run various other tests with some permutations of the script,
to mention some: SNAT in random/random-fully/persistent mode, no zones (no
overlaps), static zones (original, reply, both directions), etc.

  [1] http://thread.gmane.org/gmane.comp.security.firewalls.netfilter.devel/57412/
  [2] https://paste.fedoraproject.org/242835/65657871/Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 308ac914
#ifndef _NF_CONNTRACK_ZONES_H #ifndef _NF_CONNTRACK_ZONES_H
#define _NF_CONNTRACK_ZONES_H #define _NF_CONNTRACK_ZONES_H
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#define NF_CT_DEFAULT_ZONE_ID 0 #define NF_CT_DEFAULT_ZONE_ID 0
#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
struct nf_conntrack_zone { struct nf_conntrack_zone {
u16 id; u16 id;
u16 dir;
}; };
extern const struct nf_conntrack_zone nf_ct_zone_dflt; extern const struct nf_conntrack_zone nf_ct_zone_dflt;
...@@ -29,8 +37,29 @@ nf_ct_zone_tmpl(const struct nf_conn *tmpl) ...@@ -29,8 +37,29 @@ nf_ct_zone_tmpl(const struct nf_conn *tmpl)
return tmpl ? nf_ct_zone(tmpl) : &nf_ct_zone_dflt; return tmpl ? nf_ct_zone(tmpl) : &nf_ct_zone_dflt;
} }
static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
enum ip_conntrack_dir dir)
{
return zone->dir & (1 << dir);
}
static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
enum ip_conntrack_dir dir)
{
return nf_ct_zone_matches_dir(zone, dir) ?
zone->id : NF_CT_DEFAULT_ZONE_ID;
}
static inline bool nf_ct_zone_equal(const struct nf_conn *a, static inline bool nf_ct_zone_equal(const struct nf_conn *a,
const struct nf_conntrack_zone *b) const struct nf_conntrack_zone *b,
enum ip_conntrack_dir dir)
{
return nf_ct_zone_id(nf_ct_zone(a), dir) ==
nf_ct_zone_id(b, dir);
}
static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
const struct nf_conntrack_zone *b)
{ {
return nf_ct_zone(a)->id == b->id; return nf_ct_zone(a)->id == b->id;
} }
......
...@@ -61,6 +61,7 @@ enum ctattr_tuple { ...@@ -61,6 +61,7 @@ enum ctattr_tuple {
CTA_TUPLE_UNSPEC, CTA_TUPLE_UNSPEC,
CTA_TUPLE_IP, CTA_TUPLE_IP,
CTA_TUPLE_PROTO, CTA_TUPLE_PROTO,
CTA_TUPLE_ZONE,
__CTA_TUPLE_MAX __CTA_TUPLE_MAX
}; };
#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1) #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
......
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
enum { enum {
XT_CT_NOTRACK = 1 << 0, XT_CT_NOTRACK = 1 << 0,
XT_CT_NOTRACK_ALIAS = 1 << 1, XT_CT_NOTRACK_ALIAS = 1 << 1,
XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS, XT_CT_ZONE_DIR_ORIG = 1 << 2,
XT_CT_ZONE_DIR_REPL = 1 << 3,
XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL,
}; };
struct xt_ct_target_info { struct xt_ct_target_info {
......
...@@ -45,8 +45,12 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, ...@@ -45,8 +45,12 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
{ {
u16 zone_id = NF_CT_DEFAULT_ZONE_ID; u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (skb->nfct) if (skb->nfct) {
zone_id = nf_ct_zone((struct nf_conn *)skb->nfct)->id; enum ip_conntrack_info ctinfo;
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
}
#endif #endif
if (nf_bridge_in_prerouting(skb)) if (nf_bridge_in_prerouting(skb))
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id; return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
......
...@@ -35,8 +35,12 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, ...@@ -35,8 +35,12 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
{ {
u16 zone_id = NF_CT_DEFAULT_ZONE_ID; u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (skb->nfct) if (skb->nfct) {
zone_id = nf_ct_zone((struct nf_conn *)skb->nfct)->id; enum ip_conntrack_info ctinfo;
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
}
#endif #endif
if (nf_bridge_in_prerouting(skb)) if (nf_bridge_in_prerouting(skb))
return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id; return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
......
...@@ -126,8 +126,7 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); ...@@ -126,8 +126,7 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
unsigned int nf_conntrack_hash_rnd __read_mostly; unsigned int nf_conntrack_hash_rnd __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
const struct nf_conntrack_zone *zone)
{ {
unsigned int n; unsigned int n;
...@@ -136,7 +135,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, ...@@ -136,7 +135,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
* three bytes manually. * three bytes manually.
*/ */
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
return jhash2((u32 *)tuple, n, zone->id ^ nf_conntrack_hash_rnd ^ return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
(((__force __u16)tuple->dst.u.all << 16) | (((__force __u16)tuple->dst.u.all << 16) |
tuple->dst.protonum)); tuple->dst.protonum));
} }
...@@ -152,17 +151,15 @@ static u32 hash_bucket(u32 hash, const struct net *net) ...@@ -152,17 +151,15 @@ static u32 hash_bucket(u32 hash, const struct net *net)
} }
static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone,
unsigned int size) unsigned int size)
{ {
return __hash_bucket(hash_conntrack_raw(tuple, zone), size); return __hash_bucket(hash_conntrack_raw(tuple), size);
} }
static inline u_int32_t hash_conntrack(const struct net *net, static inline u_int32_t hash_conntrack(const struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple) const struct nf_conntrack_tuple *tuple)
{ {
return __hash_conntrack(tuple, zone, net->ct.htable_size); return __hash_conntrack(tuple, net->ct.htable_size);
} }
bool bool
...@@ -312,6 +309,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, ...@@ -312,6 +309,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
if (!nf_ct_zone) if (!nf_ct_zone)
goto out_free; goto out_free;
nf_ct_zone->id = zone->id; nf_ct_zone->id = zone->id;
nf_ct_zone->dir = zone->dir;
} }
#endif #endif
atomic_set(&tmpl->ct_general.use, 0); atomic_set(&tmpl->ct_general.use, 0);
...@@ -376,20 +374,18 @@ destroy_conntrack(struct nf_conntrack *nfct) ...@@ -376,20 +374,18 @@ destroy_conntrack(struct nf_conntrack *nfct)
static void nf_ct_delete_from_lists(struct nf_conn *ct) static void nf_ct_delete_from_lists(struct nf_conn *ct)
{ {
const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct); struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash; unsigned int hash, reply_hash;
unsigned int sequence; unsigned int sequence;
zone = nf_ct_zone(ct);
nf_ct_helper_destroy(ct); nf_ct_helper_destroy(ct);
local_bh_disable(); local_bh_disable();
do { do {
sequence = read_seqcount_begin(&net->ct.generation); sequence = read_seqcount_begin(&net->ct.generation);
hash = hash_conntrack(net, zone, hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
reply_hash = hash_conntrack(net, zone, reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
...@@ -446,7 +442,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, ...@@ -446,7 +442,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
* so we need to check that the conntrack is confirmed * so we need to check that the conntrack is confirmed
*/ */
return nf_ct_tuple_equal(tuple, &h->tuple) && return nf_ct_tuple_equal(tuple, &h->tuple) &&
nf_ct_zone_equal(ct, zone) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
nf_ct_is_confirmed(ct); nf_ct_is_confirmed(ct);
} }
...@@ -523,7 +519,7 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, ...@@ -523,7 +519,7 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple) const struct nf_conntrack_tuple *tuple)
{ {
return __nf_conntrack_find_get(net, zone, tuple, return __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, zone)); hash_conntrack_raw(tuple));
} }
EXPORT_SYMBOL_GPL(nf_conntrack_find_get); EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
...@@ -554,9 +550,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) ...@@ -554,9 +550,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
local_bh_disable(); local_bh_disable();
do { do {
sequence = read_seqcount_begin(&net->ct.generation); sequence = read_seqcount_begin(&net->ct.generation);
hash = hash_conntrack(net, zone, hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
reply_hash = hash_conntrack(net, zone, reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
...@@ -564,12 +560,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) ...@@ -564,12 +560,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple) && &h->tuple) &&
nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone)) nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
NF_CT_DIRECTION(h)))
goto out; goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple) && &h->tuple) &&
nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone)) nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
NF_CT_DIRECTION(h)))
goto out; goto out;
add_timer(&ct->timeout); add_timer(&ct->timeout);
...@@ -623,7 +621,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -623,7 +621,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* reuse the hash saved before */ /* reuse the hash saved before */
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = hash_bucket(hash, net); hash = hash_bucket(hash, net);
reply_hash = hash_conntrack(net, zone, reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
...@@ -655,12 +653,14 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -655,12 +653,14 @@ __nf_conntrack_confirm(struct sk_buff *skb)
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple) && &h->tuple) &&
nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone)) nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
NF_CT_DIRECTION(h)))
goto out; goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple) && &h->tuple) &&
nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone)) nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
NF_CT_DIRECTION(h)))
goto out; goto out;
/* Timer relative to confirmation time, not original /* Timer relative to confirmation time, not original
...@@ -720,7 +720,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, ...@@ -720,7 +720,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
unsigned int hash; unsigned int hash;
zone = nf_ct_zone(ignored_conntrack); zone = nf_ct_zone(ignored_conntrack);
hash = hash_conntrack(net, zone, tuple); hash = hash_conntrack(net, tuple);
/* Disable BHs the entire time since we need to disable them at /* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway. * least once for the stats anyway.
...@@ -730,7 +730,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, ...@@ -730,7 +730,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
ct = nf_ct_tuplehash_to_ctrack(h); ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack && if (ct != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_tuple_equal(tuple, &h->tuple) &&
nf_ct_zone_equal(ct, zone)) { nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
NF_CT_STAT_INC(net, found); NF_CT_STAT_INC(net, found);
rcu_read_unlock_bh(); rcu_read_unlock_bh();
return 1; return 1;
...@@ -830,7 +830,7 @@ __nf_conntrack_alloc(struct net *net, ...@@ -830,7 +830,7 @@ __nf_conntrack_alloc(struct net *net,
if (unlikely(!nf_conntrack_hash_rnd)) { if (unlikely(!nf_conntrack_hash_rnd)) {
init_nf_conntrack_hash_rnd(); init_nf_conntrack_hash_rnd();
/* recompute the hash as nf_conntrack_hash_rnd is initialized */ /* recompute the hash as nf_conntrack_hash_rnd is initialized */
hash = hash_conntrack_raw(orig, zone); hash = hash_conntrack_raw(orig);
} }
/* We don't want any race condition at early drop stage */ /* We don't want any race condition at early drop stage */
...@@ -875,6 +875,7 @@ __nf_conntrack_alloc(struct net *net, ...@@ -875,6 +875,7 @@ __nf_conntrack_alloc(struct net *net,
if (!nf_ct_zone) if (!nf_ct_zone)
goto out_free; goto out_free;
nf_ct_zone->id = zone->id; nf_ct_zone->id = zone->id;
nf_ct_zone->dir = zone->dir;
} }
#endif #endif
/* Because we use RCU lookups, we set ct_general.use to zero before /* Because we use RCU lookups, we set ct_general.use to zero before
...@@ -1053,7 +1054,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, ...@@ -1053,7 +1054,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
/* look for tuple match */ /* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl); zone = nf_ct_zone_tmpl(tmpl);
hash = hash_conntrack_raw(&tuple, zone); hash = hash_conntrack_raw(&tuple);
h = __nf_conntrack_find_get(net, zone, &tuple, hash); h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) { if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
...@@ -1306,6 +1307,7 @@ EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); ...@@ -1306,6 +1307,7 @@ EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
/* Built-in default zone used e.g. by modules. */ /* Built-in default zone used e.g. by modules. */
const struct nf_conntrack_zone nf_ct_zone_dflt = { const struct nf_conntrack_zone nf_ct_zone_dflt = {
.id = NF_CT_DEFAULT_ZONE_ID, .id = NF_CT_DEFAULT_ZONE_ID,
.dir = NF_CT_DEFAULT_ZONE_DIR,
}; };
EXPORT_SYMBOL_GPL(nf_ct_zone_dflt); EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
...@@ -1617,8 +1619,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) ...@@ -1617,8 +1619,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
struct nf_conntrack_tuple_hash, hnnode); struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h); ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode); hlist_nulls_del_rcu(&h->hnnode);
bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), bucket = __hash_conntrack(&h->tuple, hashsize);
hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
} }
} }
......
...@@ -101,7 +101,7 @@ __nf_ct_expect_find(struct net *net, ...@@ -101,7 +101,7 @@ __nf_ct_expect_find(struct net *net,
h = nf_ct_expect_dst_hash(tuple); h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
nf_ct_zone_equal(i->master, zone)) nf_ct_zone_equal_any(i->master, zone))
return i; return i;
} }
return NULL; return NULL;
...@@ -143,7 +143,7 @@ nf_ct_find_expectation(struct net *net, ...@@ -143,7 +143,7 @@ nf_ct_find_expectation(struct net *net,
hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) && if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
nf_ct_zone_equal(i->master, zone)) { nf_ct_zone_equal_any(i->master, zone)) {
exp = i; exp = i;
break; break;
} }
...@@ -223,7 +223,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, ...@@ -223,7 +223,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
} }
return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) && return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
nf_ct_zone_equal(a->master, nf_ct_zone(b->master)); nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
} }
static inline int expect_matches(const struct nf_conntrack_expect *a, static inline int expect_matches(const struct nf_conntrack_expect *a,
...@@ -232,7 +232,7 @@ static inline int expect_matches(const struct nf_conntrack_expect *a, ...@@ -232,7 +232,7 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
return a->master == b->master && a->class == b->class && return a->master == b->master && a->class == b->class &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
nf_ct_zone_equal(a->master, nf_ct_zone(b->master)); nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
} }
/* Generally a bad idea to call this: could have matched already. */ /* Generally a bad idea to call this: could have matched already. */
......
This diff is collapsed.
...@@ -141,12 +141,30 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) ...@@ -141,12 +141,30 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
#endif #endif
#ifdef CONFIG_NF_CONNTRACK_ZONES #ifdef CONFIG_NF_CONNTRACK_ZONES
static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct) static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
int dir)
{ {
seq_printf(s, "zone=%u ", nf_ct_zone(ct)->id); const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
if (zone->dir != dir)
return;
switch (zone->dir) {
case NF_CT_DEFAULT_ZONE_DIR:
seq_printf(s, "zone=%u ", zone->id);
break;
case NF_CT_ZONE_DIR_ORIG:
seq_printf(s, "zone-orig=%u ", zone->id);
break;
case NF_CT_ZONE_DIR_REPL:
seq_printf(s, "zone-reply=%u ", zone->id);
break;
default:
break;
}
} }
#else #else
static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct) static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
int dir)
{ {
} }
#endif #endif
...@@ -213,6 +231,8 @@ static int ct_seq_show(struct seq_file *s, void *v) ...@@ -213,6 +231,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto); l3proto, l4proto);
ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
if (seq_has_overflowed(s)) if (seq_has_overflowed(s))
goto release; goto release;
...@@ -225,6 +245,8 @@ static int ct_seq_show(struct seq_file *s, void *v) ...@@ -225,6 +245,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto); l3proto, l4proto);
ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release; goto release;
...@@ -239,7 +261,7 @@ static int ct_seq_show(struct seq_file *s, void *v) ...@@ -239,7 +261,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
#endif #endif
ct_show_secctx(s, ct); ct_show_secctx(s, ct);
ct_show_zone(s, ct); ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
ct_show_delta_time(s, ct); ct_show_delta_time(s, ct);
seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)); seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
......
...@@ -118,15 +118,13 @@ EXPORT_SYMBOL(nf_xfrm_me_harder); ...@@ -118,15 +118,13 @@ EXPORT_SYMBOL(nf_xfrm_me_harder);
/* We keep an extra hash for each conntrack, for fast searching. */ /* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int static inline unsigned int
hash_by_src(const struct net *net, hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{ {
unsigned int hash; unsigned int hash;
/* Original src, to ensure we map it consistently if poss. */ /* Original src, to ensure we map it consistently if poss. */
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
tuple->dst.protonum ^ zone->id ^ nf_conntrack_hash_rnd); tuple->dst.protonum ^ nf_conntrack_hash_rnd);
return reciprocal_scale(hash, net->ct.nat_htable_size); return reciprocal_scale(hash, net->ct.nat_htable_size);
} }
...@@ -194,13 +192,14 @@ find_appropriate_src(struct net *net, ...@@ -194,13 +192,14 @@ find_appropriate_src(struct net *net,
struct nf_conntrack_tuple *result, struct nf_conntrack_tuple *result,
const struct nf_nat_range *range) const struct nf_nat_range *range)
{ {
unsigned int h = hash_by_src(net, zone, tuple); unsigned int h = hash_by_src(net, tuple);
const struct nf_conn_nat *nat; const struct nf_conn_nat *nat;
const struct nf_conn *ct; const struct nf_conn *ct;
hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
ct = nat->ct; ct = nat->ct;
if (same_src(ct, tuple) && nf_ct_zone_equal(ct, zone)) { if (same_src(ct, tuple) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */ /* Copy source part from reply tuple. */
nf_ct_invert_tuplepr(result, nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
...@@ -425,7 +424,7 @@ nf_nat_setup_info(struct nf_conn *ct, ...@@ -425,7 +424,7 @@ nf_nat_setup_info(struct nf_conn *ct,
if (maniptype == NF_NAT_MANIP_SRC) { if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash; unsigned int srchash;
srchash = hash_by_src(net, nf_ct_zone(ct), srchash = hash_by_src(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock); spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate extension aera */ /* nf_conntrack_alter_reply might re-allocate extension aera */
......
...@@ -181,6 +181,19 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, ...@@ -181,6 +181,19 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
#endif #endif
} }
static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
{
switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
XT_CT_ZONE_DIR_REPL)) {
case XT_CT_ZONE_DIR_ORIG:
return NF_CT_ZONE_DIR_ORIG;
case XT_CT_ZONE_DIR_REPL:
return NF_CT_ZONE_DIR_REPL;
default:
return NF_CT_DEFAULT_ZONE_DIR;
}
}
static int xt_ct_tg_check(const struct xt_tgchk_param *par, static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info) struct xt_ct_target_info_v1 *info)
{ {
...@@ -194,7 +207,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, ...@@ -194,7 +207,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
} }
#ifndef CONFIG_NF_CONNTRACK_ZONES #ifndef CONFIG_NF_CONNTRACK_ZONES
if (info->zone) if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
XT_CT_ZONE_DIR_REPL))
goto err1; goto err1;
#endif #endif
...@@ -204,6 +218,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, ...@@ -204,6 +218,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
memset(&zone, 0, sizeof(zone)); memset(&zone, 0, sizeof(zone));
zone.id = info->zone; zone.id = info->zone;
zone.dir = xt_ct_flags_to_dir(info);
ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL); ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
ret = PTR_ERR(ct); ret = PTR_ERR(ct);
......
...@@ -72,6 +72,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, ...@@ -72,6 +72,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
goto out; goto out;
zone.id = ca->zone; zone.id = ca->zone;
zone.dir = NF_CT_DEFAULT_ZONE_DIR;
thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple); thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
if (!thash) if (!thash)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment