Commit c75a312d authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://1984.lsi.us.es/net-next

parents b4fb05ea ace30d73
......@@ -10,6 +10,7 @@ header-y += nfnetlink.h
header-y += nfnetlink_acct.h
header-y += nfnetlink_compat.h
header-y += nfnetlink_conntrack.h
header-y += nfnetlink_cttimeout.h
header-y += nfnetlink_log.h
header-y += nfnetlink_queue.h
header-y += x_tables.h
......@@ -22,6 +23,7 @@ header-y += xt_CT.h
header-y += xt_DSCP.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h
header-y += xt_LOG.h
header-y += xt_MARK.h
header-y += xt_nfacct.h
header-y += xt_NFLOG.h
......
......@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
#include <linux/types.h>
/* The protocol version */
#define IPSET_PROTOCOL 6
......@@ -148,6 +150,7 @@ enum ipset_cmd_flags {
IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME),
IPSET_FLAG_BIT_LIST_HEADER = 2,
IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER),
IPSET_FLAG_CMD_MAX = 15, /* Lower half */
};
/* Flags at CADT attribute level */
......@@ -156,6 +159,9 @@ enum ipset_cadt_flags {
IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE),
IPSET_FLAG_BIT_PHYSDEV = 1,
IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV),
IPSET_FLAG_BIT_NOMATCH = 2,
IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH),
IPSET_FLAG_CADT_MAX = 15, /* Upper half */
};
/* Commands with settype-specific attributes */
......@@ -168,19 +174,10 @@ enum ipset_adt {
IPSET_CADT_MAX,
};
#ifdef __KERNEL__
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
* and IPSET_INVALID_ID if you want to increase the max number of sets.
*/
typedef u16 ip_set_id_t;
typedef __u16 ip_set_id_t;
#define IPSET_INVALID_ID 65535
......@@ -203,6 +200,15 @@ enum ip_set_kopt {
IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
};
#ifdef __KERNEL__
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
/* Set features */
enum ip_set_feature {
IPSET_TYPE_IP_FLAG = 0,
......@@ -288,7 +294,10 @@ struct ip_set_type {
u8 features;
/* Set type dimension */
u8 dimension;
/* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
/*
* Supported family: may be NFPROTO_UNSPEC for both
* NFPROTO_IPV4/NFPROTO_IPV6.
*/
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
......@@ -450,6 +459,8 @@ bitmap_bytes(u32 a, u32 b)
return 4 * ((((b - a + 8) / 8) + 3) / 4);
}
#endif /* __KERNEL__ */
/* Interface to iptables/ip6tables */
#define SO_IP_SET 83
......@@ -475,6 +486,4 @@ struct ip_set_req_version {
unsigned version;
};
#endif /* __KERNEL__ */
#endif /*_IP_SET_H */
......@@ -113,6 +113,12 @@ htable_bits(u32 hashsize)
}
#ifdef IP_SET_HASH_WITH_NETS
#ifdef IP_SET_HASH_WITH_NETS_PACKED
/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
#define CIDR(cidr) (cidr + 1)
#else
#define CIDR(cidr) (cidr)
#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
......@@ -262,6 +268,12 @@ ip_set_hash_destroy(struct ip_set *set)
#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
#define type_pf_data_next TOKEN(TYPE, PF, _data_next)
#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags)
#ifdef IP_SET_HASH_WITH_NETS
#define type_pf_data_match TOKEN(TYPE, PF, _data_match)
#else
#define type_pf_data_match(d) 1
#endif
#define type_pf_elem TOKEN(TYPE, PF, _elem)
#define type_pf_telem TOKEN(TYPE, PF, _telem)
......@@ -308,8 +320,10 @@ ip_set_hash_destroy(struct ip_set *set)
* we spare the maintenance of the internal counters. */
static int
type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value,
u8 ahash_max)
u8 ahash_max, u32 cadt_flags)
{
struct type_pf_elem *data;
if (n->pos >= n->size) {
void *tmp;
......@@ -330,7 +344,13 @@ type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value,
n->value = tmp;
n->size += AHASH_INIT_SIZE;
}
type_pf_data_copy(ahash_data(n, n->pos++), value);
data = ahash_data(n, n->pos++);
type_pf_data_copy(data, value);
#ifdef IP_SET_HASH_WITH_NETS
/* Resizing won't overwrite stored flags */
if (cadt_flags)
type_pf_data_flags(data, cadt_flags);
#endif
return 0;
}
......@@ -353,9 +373,12 @@ type_pf_resize(struct ip_set *set, bool retried)
htable_bits++;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
if (!htable_bits)
if (!htable_bits) {
/* In case we have plenty of memory :-) */
pr_warning("Cannot increase the hashsize of set %s further\n",
set->name);
return -IPSET_ERR_HASH_FULL;
}
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
......@@ -368,7 +391,7 @@ type_pf_resize(struct ip_set *set, bool retried)
for (j = 0; j < n->pos; j++) {
data = ahash_data(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
ret = type_pf_elem_add(m, data, AHASH_MAX(h));
ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0);
if (ret < 0) {
read_unlock_bh(&set->lock);
ahash_destroy(t);
......@@ -406,9 +429,14 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
struct hbucket *n;
int i, ret = 0;
u32 key, multi = 0;
u32 cadt_flags = flags >> 16;
if (h->elements >= h->maxelem)
if (h->elements >= h->maxelem) {
if (net_ratelimit())
pr_warning("Set %s is full, maxelem %u reached\n",
set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
}
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
......@@ -416,11 +444,17 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
n = hbucket(t, key);
for (i = 0; i < n->pos; i++)
if (type_pf_data_equal(ahash_data(n, i), d, &multi)) {
#ifdef IP_SET_HASH_WITH_NETS
if (flags & IPSET_FLAG_EXIST)
/* Support overwriting just the flags */
type_pf_data_flags(ahash_data(n, i),
cadt_flags);
#endif
ret = -IPSET_ERR_EXIST;
goto out;
}
TUNE_AHASH_MAX(h, multi);
ret = type_pf_elem_add(n, value, AHASH_MAX(h));
ret = type_pf_elem_add(n, value, AHASH_MAX(h), cadt_flags);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
......@@ -428,7 +462,7 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
}
#ifdef IP_SET_HASH_WITH_NETS
add_cidr(h, d->cidr, HOST_MASK);
add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
h->elements++;
out:
......@@ -463,7 +497,7 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, d->cidr, HOST_MASK);
del_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
......@@ -506,7 +540,7 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i);
if (type_pf_data_equal(data, d, &multi))
return 1;
return type_pf_data_match(data);
}
}
return 0;
......@@ -528,7 +562,7 @@ type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
#ifdef IP_SET_HASH_WITH_NETS
/* If we test an IP address and not a network address,
* try all possible network sizes */
if (d->cidr == SET_HOST_MASK(set->family))
if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_test_cidrs(set, d, timeout);
#endif
......@@ -537,7 +571,7 @@ type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i);
if (type_pf_data_equal(data, d, &multi))
return 1;
return type_pf_data_match(data);
}
return 0;
}
......@@ -693,7 +727,7 @@ type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
static int
type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
u8 ahash_max, u32 timeout)
u8 ahash_max, u32 cadt_flags, u32 timeout)
{
struct type_pf_elem *data;
......@@ -720,6 +754,11 @@ type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
data = ahash_tdata(n, n->pos++);
type_pf_data_copy(data, value);
type_pf_data_timeout_set(data, timeout);
#ifdef IP_SET_HASH_WITH_NETS
/* Resizing won't overwrite stored flags */
if (cadt_flags)
type_pf_data_flags(data, cadt_flags);
#endif
return 0;
}
......@@ -740,7 +779,7 @@ type_pf_expire(struct ip_set_hash *h)
if (type_pf_data_expired(data)) {
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, data->cidr, HOST_MASK);
del_cidr(h, CIDR(data->cidr), HOST_MASK);
#endif
if (j != n->pos - 1)
/* Not last one */
......@@ -790,9 +829,12 @@ type_pf_tresize(struct ip_set *set, bool retried)
retry:
ret = 0;
htable_bits++;
if (!htable_bits)
if (!htable_bits) {
/* In case we have plenty of memory :-) */
pr_warning("Cannot increase the hashsize of set %s further\n",
set->name);
return -IPSET_ERR_HASH_FULL;
}
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
......@@ -805,7 +847,7 @@ type_pf_tresize(struct ip_set *set, bool retried)
for (j = 0; j < n->pos; j++) {
data = ahash_tdata(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
ret = type_pf_elem_tadd(m, data, AHASH_MAX(h),
ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
type_pf_data_timeout(data));
if (ret < 0) {
read_unlock_bh(&set->lock);
......@@ -839,12 +881,17 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
int ret = 0, i, j = AHASH_MAX(h) + 1;
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
u32 cadt_flags = flags >> 16;
if (h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
type_pf_expire(h);
if (h->elements >= h->maxelem)
if (h->elements >= h->maxelem) {
if (net_ratelimit())
pr_warning("Set %s is full, maxelem %u reached\n",
set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
}
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
......@@ -854,6 +901,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
data = ahash_tdata(n, i);
if (type_pf_data_equal(data, d, &multi)) {
if (type_pf_data_expired(data) || flag_exist)
/* Just timeout value may be updated */
j = i;
else {
ret = -IPSET_ERR_EXIST;
......@@ -866,15 +914,18 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
if (j != AHASH_MAX(h) + 1) {
data = ahash_tdata(n, j);
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, data->cidr, HOST_MASK);
add_cidr(h, d->cidr, HOST_MASK);
del_cidr(h, CIDR(data->cidr), HOST_MASK);
add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
type_pf_data_copy(data, d);
type_pf_data_timeout_set(data, timeout);
#ifdef IP_SET_HASH_WITH_NETS
type_pf_data_flags(data, cadt_flags);
#endif
goto out;
}
TUNE_AHASH_MAX(h, multi);
ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), timeout);
ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), cadt_flags, timeout);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
......@@ -882,7 +933,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
}
#ifdef IP_SET_HASH_WITH_NETS
add_cidr(h, d->cidr, HOST_MASK);
add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
h->elements++;
out:
......@@ -916,7 +967,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, d->cidr, HOST_MASK);
del_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
......@@ -954,8 +1005,17 @@ type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
if (type_pf_data_equal(data, d, &multi))
return !type_pf_data_expired(data);
#ifdef IP_SET_HASH_WITH_MULTI
if (type_pf_data_equal(data, d, &multi)) {
if (!type_pf_data_expired(data))
return type_pf_data_match(data);
multi = 0;
}
#else
if (type_pf_data_equal(data, d, &multi) &&
!type_pf_data_expired(data))
return type_pf_data_match(data);
#endif
}
}
return 0;
......@@ -973,15 +1033,16 @@ type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
u32 key, multi = 0;
#ifdef IP_SET_HASH_WITH_NETS
if (d->cidr == SET_HOST_MASK(set->family))
if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_ttest_cidrs(set, d, timeout);
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
if (type_pf_data_equal(data, d, &multi))
return !type_pf_data_expired(data);
if (type_pf_data_equal(data, d, &multi) &&
!type_pf_data_expired(data))
return type_pf_data_match(data);
}
return 0;
}
......@@ -1094,14 +1155,17 @@ type_pf_gc_init(struct ip_set *set)
#undef type_pf_data_isnull
#undef type_pf_data_copy
#undef type_pf_data_zero_out
#undef type_pf_data_netmask
#undef type_pf_data_list
#undef type_pf_data_tlist
#undef type_pf_data_next
#undef type_pf_data_flags
#undef type_pf_data_match
#undef type_pf_elem
#undef type_pf_telem
#undef type_pf_data_timeout
#undef type_pf_data_expired
#undef type_pf_data_netmask
#undef type_pf_data_timeout_set
#undef type_pf_elem_add
......@@ -1111,6 +1175,7 @@ type_pf_gc_init(struct ip_set *set)
#undef type_pf_test
#undef type_pf_elem_tadd
#undef type_pf_del_telem
#undef type_pf_expire
#undef type_pf_tadd
#undef type_pf_tdel
......
......@@ -18,7 +18,10 @@ enum tcp_conntrack {
TCP_CONNTRACK_LISTEN, /* obsolete */
#define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN
TCP_CONNTRACK_MAX,
TCP_CONNTRACK_IGNORE
TCP_CONNTRACK_IGNORE,
TCP_CONNTRACK_RETRANS,
TCP_CONNTRACK_UNACK,
TCP_CONNTRACK_TIMEOUT_MAX
};
/* Window scaling is advertised by the sender */
......
......@@ -49,7 +49,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_OSF 5
#define NFNL_SUBSYS_IPSET 6
#define NFNL_SUBSYS_ACCT 7
#define NFNL_SUBSYS_COUNT 8
#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8
#define NFNL_SUBSYS_COUNT 9
#ifdef __KERNEL__
......
......@@ -173,10 +173,21 @@ enum ctattr_expect {
CTA_EXPECT_HELP_NAME,
CTA_EXPECT_ZONE,
CTA_EXPECT_FLAGS,
CTA_EXPECT_CLASS,
CTA_EXPECT_NAT,
CTA_EXPECT_FN,
__CTA_EXPECT_MAX
};
#define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1)
enum ctattr_expect_nat {
CTA_EXPECT_NAT_UNSPEC,
CTA_EXPECT_NAT_DIR,
CTA_EXPECT_NAT_TUPLE,
__CTA_EXPECT_NAT_MAX
};
#define CTA_EXPECT_NAT_MAX (__CTA_EXPECT_NAT_MAX - 1)
enum ctattr_help {
CTA_HELP_UNSPEC,
CTA_HELP_NAME,
......
#ifndef _CTTIMEOUT_NETLINK_H
#define _CTTIMEOUT_NETLINK_H
#include <linux/netfilter/nfnetlink.h>
enum ctnl_timeout_msg_types {
IPCTNL_MSG_TIMEOUT_NEW,
IPCTNL_MSG_TIMEOUT_GET,
IPCTNL_MSG_TIMEOUT_DELETE,
IPCTNL_MSG_TIMEOUT_MAX
};
enum ctattr_timeout {
CTA_TIMEOUT_UNSPEC,
CTA_TIMEOUT_NAME,
CTA_TIMEOUT_L3PROTO,
CTA_TIMEOUT_L4PROTO,
CTA_TIMEOUT_DATA,
CTA_TIMEOUT_USE,
__CTA_TIMEOUT_MAX
};
#define CTA_TIMEOUT_MAX (__CTA_TIMEOUT_MAX - 1)
enum ctattr_timeout_generic {
CTA_TIMEOUT_GENERIC_UNSPEC,
CTA_TIMEOUT_GENERIC_TIMEOUT,
__CTA_TIMEOUT_GENERIC_MAX
};
#define CTA_TIMEOUT_GENERIC_MAX (__CTA_TIMEOUT_GENERIC_MAX - 1)
enum ctattr_timeout_tcp {
CTA_TIMEOUT_TCP_UNSPEC,
CTA_TIMEOUT_TCP_SYN_SENT,
CTA_TIMEOUT_TCP_SYN_RECV,
CTA_TIMEOUT_TCP_ESTABLISHED,
CTA_TIMEOUT_TCP_FIN_WAIT,
CTA_TIMEOUT_TCP_CLOSE_WAIT,
CTA_TIMEOUT_TCP_LAST_ACK,
CTA_TIMEOUT_TCP_TIME_WAIT,
CTA_TIMEOUT_TCP_CLOSE,
CTA_TIMEOUT_TCP_SYN_SENT2,
CTA_TIMEOUT_TCP_RETRANS,
CTA_TIMEOUT_TCP_UNACK,
__CTA_TIMEOUT_TCP_MAX
};
#define CTA_TIMEOUT_TCP_MAX (__CTA_TIMEOUT_TCP_MAX - 1)
enum ctattr_timeout_udp {
CTA_TIMEOUT_UDP_UNSPEC,
CTA_TIMEOUT_UDP_UNREPLIED,
CTA_TIMEOUT_UDP_REPLIED,
__CTA_TIMEOUT_UDP_MAX
};
#define CTA_TIMEOUT_UDP_MAX (__CTA_TIMEOUT_UDP_MAX - 1)
enum ctattr_timeout_udplite {
CTA_TIMEOUT_UDPLITE_UNSPEC,
CTA_TIMEOUT_UDPLITE_UNREPLIED,
CTA_TIMEOUT_UDPLITE_REPLIED,
__CTA_TIMEOUT_UDPLITE_MAX
};
#define CTA_TIMEOUT_UDPLITE_MAX (__CTA_TIMEOUT_UDPLITE_MAX - 1)
enum ctattr_timeout_icmp {
CTA_TIMEOUT_ICMP_UNSPEC,
CTA_TIMEOUT_ICMP_TIMEOUT,
__CTA_TIMEOUT_ICMP_MAX
};
#define CTA_TIMEOUT_ICMP_MAX (__CTA_TIMEOUT_ICMP_MAX - 1)
enum ctattr_timeout_dccp {
CTA_TIMEOUT_DCCP_UNSPEC,
CTA_TIMEOUT_DCCP_REQUEST,
CTA_TIMEOUT_DCCP_RESPOND,
CTA_TIMEOUT_DCCP_PARTOPEN,
CTA_TIMEOUT_DCCP_OPEN,
CTA_TIMEOUT_DCCP_CLOSEREQ,
CTA_TIMEOUT_DCCP_CLOSING,
CTA_TIMEOUT_DCCP_TIMEWAIT,
__CTA_TIMEOUT_DCCP_MAX
};
#define CTA_TIMEOUT_DCCP_MAX (__CTA_TIMEOUT_DCCP_MAX - 1)
enum ctattr_timeout_sctp {
CTA_TIMEOUT_SCTP_UNSPEC,
CTA_TIMEOUT_SCTP_CLOSED,
CTA_TIMEOUT_SCTP_COOKIE_WAIT,
CTA_TIMEOUT_SCTP_COOKIE_ECHOED,
CTA_TIMEOUT_SCTP_ESTABLISHED,
CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
enum ctattr_timeout_icmpv6 {
CTA_TIMEOUT_ICMPV6_UNSPEC,
CTA_TIMEOUT_ICMPV6_TIMEOUT,
__CTA_TIMEOUT_ICMPV6_MAX
};
#define CTA_TIMEOUT_ICMPV6_MAX (__CTA_TIMEOUT_ICMPV6_MAX - 1)
enum ctattr_timeout_gre {
CTA_TIMEOUT_GRE_UNSPEC,
CTA_TIMEOUT_GRE_UNREPLIED,
CTA_TIMEOUT_GRE_REPLIED,
__CTA_TIMEOUT_GRE_MAX
};
#define CTA_TIMEOUT_GRE_MAX (__CTA_TIMEOUT_GRE_MAX - 1)
#define CTNL_TIMEOUT_NAME_MAX 32
#endif
......@@ -16,4 +16,16 @@ struct xt_ct_target_info {
struct nf_conn *ct __attribute__((aligned(8)));
};
struct xt_ct_target_info_v1 {
__u16 flags;
__u16 zone;
__u32 ct_events;
__u32 exp_events;
char helper[16];
char timeout[32];
/* Used internally by the kernel */
struct nf_conn *ct __attribute__((aligned(8)));
};
#endif /* _XT_CT_H */
#ifndef _XT_LOG_H
#define _XT_LOG_H
/* make sure not to change this without changing nf_log.h:NF_LOG_* (!) */
#define XT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
#define XT_LOG_TCPOPT 0x02 /* Log TCP options */
#define XT_LOG_IPOPT 0x04 /* Log IP options */
#define XT_LOG_UID 0x08 /* Log UID owning local socket */
#define XT_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
#define XT_LOG_MACDECODE 0x20 /* Decode MAC header */
#define XT_LOG_MASK 0x2f
struct xt_log_info {
unsigned char level;
unsigned char logflags;
char prefix[30];
};
#endif /* _XT_LOG_H */
......@@ -4,11 +4,9 @@ header-y += ipt_CLUSTERIP.h
header-y += ipt_ECN.h
header-y += ipt_LOG.h
header-y += ipt_REJECT.h
header-y += ipt_SAME.h
header-y += ipt_TTL.h
header-y += ipt_ULOG.h
header-y += ipt_addrtype.h
header-y += ipt_ah.h
header-y += ipt_ecn.h
header-y += ipt_realm.h
header-y += ipt_ttl.h
#ifndef _IPT_LOG_H
#define _IPT_LOG_H
#warning "Please update iptables, this file will be removed soon!"
/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */
#define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
#define IPT_LOG_TCPOPT 0x02 /* Log TCP options */
......
#ifndef _IPT_SAME_H
#define _IPT_SAME_H
#include <linux/types.h>
#define IPT_SAME_MAX_RANGE 10
#define IPT_SAME_NODST 0x01
struct ipt_same_info {
unsigned char info;
__u32 rangesize;
__u32 ipnum;
__u32 *iparray;
/* hangs off end. */
struct nf_nat_range range[IPT_SAME_MAX_RANGE];
};
#endif /*_IPT_SAME_H*/
#ifndef _IPT_REALM_H
#define _IPT_REALM_H
#include <linux/netfilter/xt_realm.h>
#define ipt_realm_info xt_realm_info
#endif /* _IPT_REALM_H */
#ifndef _IP6T_LOG_H
#define _IP6T_LOG_H
#warning "Please update iptables, this file will be removed soon!"
/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */
#define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
#define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */
......
......@@ -19,6 +19,9 @@ enum nf_ct_ext_id {
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
NF_CT_EXT_TSTAMP,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
NF_CT_EXT_TIMEOUT,
#endif
NF_CT_EXT_NUM,
};
......@@ -29,6 +32,7 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
......
......@@ -69,4 +69,17 @@ extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int timeout);
struct nf_ct_helper_expectfn {
struct list_head head;
const char *name;
void (*expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp);
};
void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n);
void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n);
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_name(const char *name);
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
#endif /*_NF_CONNTRACK_HELPER_H*/
......@@ -39,12 +39,13 @@ struct nf_conntrack_l4proto {
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum);
unsigned int hooknum,
unsigned int *timeouts);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff);
unsigned int dataoff, unsigned int *timeouts);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
......@@ -60,6 +61,9 @@ struct nf_conntrack_l4proto {
/* Print out the private part of the conntrack. */
int (*print_conntrack)(struct seq_file *s, struct nf_conn *);
/* Return the array of timeouts for this protocol. */
unsigned int *(*get_timeouts)(struct net *net);
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
......@@ -79,6 +83,17 @@ struct nf_conntrack_l4proto {
size_t nla_size;
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
size_t obj_size;
int (*nlattr_to_obj)(struct nlattr *tb[], void *data);
int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
unsigned int nlattr_max;
const struct nla_policy *nla_policy;
} ctnl_timeout;
#endif
#ifdef CONFIG_SYSCTL
struct ctl_table_header **ctl_table_header;
struct ctl_table *ctl_table;
......
#ifndef _NF_CONNTRACK_TIMEOUT_H
#define _NF_CONNTRACK_TIMEOUT_H
#include <net/net_namespace.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#define CTNL_TIMEOUT_NAME_MAX 32
struct ctnl_timeout {
struct list_head head;
struct rcu_head rcu_head;
atomic_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
__u16 l3num;
__u8 l4num;
char data[0];
};
struct nf_conn_timeout {
struct ctnl_timeout *timeout;
};
#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data)
static inline
struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT);
#else
return NULL;
#endif
}
static inline
struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
struct ctnl_timeout *timeout,
gfp_t gfp)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp);
if (timeout_ext == NULL)
return NULL;
timeout_ext->timeout = timeout;
return timeout_ext;
#else
return NULL;
#endif
};
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
extern int nf_conntrack_timeout_init(struct net *net);
extern void nf_conntrack_timeout_fini(struct net *net);
#else
static inline int nf_conntrack_timeout_init(struct net *net)
{
return 0;
}
static inline void nf_conntrack_timeout_fini(struct net *net)
{
return;
}
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(const char *name);
extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
......@@ -6,7 +6,7 @@ struct sbuff {
};
static struct sbuff emergency, *emergency_ptr = &emergency;
static int sb_add(struct sbuff *m, const char *f, ...)
static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...)
{
va_list args;
int len;
......
......@@ -123,15 +123,6 @@ config IP_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_LOG
tristate "LOG target support"
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in
any iptables table which records the packet header to the syslog.
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_ULOG
tristate "ULOG target support"
default m if NETFILTER_ADVANCED=n
......
......@@ -54,7 +54,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
......
......@@ -75,25 +75,31 @@ static int icmp_print_tuple(struct seq_file *s,
ntohs(tuple->src.u.icmp.id));
}
static unsigned int *icmp_get_timeouts(struct net *net)
{
return &nf_ct_icmp_timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
static int icmp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeout)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout);
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
static const u_int8_t valid_new[] = {
[ICMP_ECHO] = 1,
......@@ -263,6 +269,44 @@ static int icmp_nlattr_tuple_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeout = data;
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
} else {
/* Set default ICMP timeout. */
*timeout = nf_ct_icmp_timeout;
}
return 0;
}
static int
icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
[CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *icmp_sysctl_header;
static struct ctl_table icmp_sysctl_table[] = {
......@@ -298,6 +342,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.invert_tuple = icmp_invert_tuple,
.print_tuple = icmp_print_tuple,
.packet = icmp_packet,
.get_timeouts = icmp_get_timeouts,
.new = icmp_new,
.error = icmp_error,
.destroy = NULL,
......@@ -308,6 +353,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = icmp_timeout_nlattr_to_obj,
.obj_to_nlattr = icmp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_ICMP_MAX,
.obj_size = sizeof(unsigned int),
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &icmp_sysctl_header,
.ctl_table = icmp_sysctl_table,
......
......@@ -686,6 +686,11 @@ static struct pernet_operations nf_nat_net_ops = {
.exit = nf_nat_net_exit,
};
static struct nf_ct_helper_expectfn follow_master_nat = {
.name = "nat-follow-master",
.expectfn = nf_nat_follow_master,
};
static int __init nf_nat_init(void)
{
size_t i;
......@@ -717,6 +722,8 @@ static int __init nf_nat_init(void)
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
nf_ct_helper_expectfn_register(&follow_master_nat);
BUG_ON(nf_nat_seq_adjust_hook != NULL);
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
......@@ -736,6 +743,7 @@ static void __exit nf_nat_cleanup(void)
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_l3proto_put(l3proto);
nf_ct_extend_unregister(&nat_extend);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
......
......@@ -568,6 +568,16 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
static struct nf_ct_helper_expectfn q931_nat = {
.name = "Q.931",
.expectfn = ip_nat_q931_expect,
};
static struct nf_ct_helper_expectfn callforwarding_nat = {
.name = "callforwarding",
.expectfn = ip_nat_callforwarding_expect,
};
/****************************************************************************/
static int __init init(void)
{
......@@ -590,6 +600,8 @@ static int __init init(void)
RCU_INIT_POINTER(nat_h245_hook, nat_h245);
RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
RCU_INIT_POINTER(nat_q931_hook, nat_q931);
nf_ct_helper_expectfn_register(&q931_nat);
nf_ct_helper_expectfn_register(&callforwarding_nat);
return 0;
}
......@@ -605,6 +617,8 @@ static void __exit fini(void)
RCU_INIT_POINTER(nat_h245_hook, NULL);
RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
RCU_INIT_POINTER(nat_q931_hook, NULL);
nf_ct_helper_expectfn_unregister(&q931_nat);
nf_ct_helper_expectfn_unregister(&callforwarding_nat);
synchronize_rcu();
}
......
......@@ -526,6 +526,11 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
return NF_DROP;
}
static struct nf_ct_helper_expectfn sip_nat = {
.name = "sip",
.expectfn = ip_nat_sip_expected,
};
static void __exit nf_nat_sip_fini(void)
{
RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
......@@ -535,6 +540,7 @@ static void __exit nf_nat_sip_fini(void)
RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
nf_ct_helper_expectfn_unregister(&sip_nat);
synchronize_rcu();
}
......@@ -554,6 +560,7 @@ static int __init nf_nat_sip_init(void)
RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
nf_ct_helper_expectfn_register(&sip_nat);
return 0;
}
......
......@@ -154,15 +154,6 @@ config IP6_NF_TARGET_HL
(e.g. when running oldconfig). It selects
CONFIG_NETFILTER_XT_TARGET_HL.
config IP6_NF_TARGET_LOG
tristate "LOG target support"
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in
any iptables table which records the packet header to the syslog.
To compile it as a module, choose M here. If unsure, say N.
config IP6_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
......
......@@ -31,5 +31,4 @@ obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
# targets
obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
/*
* This is a module which is used for logging packets.
*/
/* (C) 2001 Jan Rekorajski <baggins@pld.org.pl>
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/spinlock.h>
#include <linux/icmpv6.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/xt_log.h>
MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
MODULE_DESCRIPTION("Xtables: IPv6 packet logging to syslog");
MODULE_LICENSE("GPL");
struct in_device;
#include <net/route.h>
#include <linux/netfilter_ipv6/ip6t_LOG.h>
/* One level of recursion won't kill us */
static void dump_packet(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
{
u_int8_t currenthdr;
int fragment;
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (ih == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
if (hp == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 48 "OPT (...) " */
if (logflags & IP6T_LOG_IPOPT)
sb_add(m, "OPT ( ");
switch (currenthdr) {
case IPPROTO_FRAGMENT: {
struct frag_hdr _fhdr;
const struct frag_hdr *fh;
sb_add(m, "FRAG:");
fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
&_fhdr);
if (fh == NULL) {
sb_add(m, "TRUNCATED ");
return;
}
/* Max length: 6 "65535 " */
sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fh->frag_off & htons(0x0001))
sb_add(m, "INCOMPLETE ");
sb_add(m, "ID:%08x ", ntohl(fh->identification));
if (ntohs(fh->frag_off) & 0xFFF8)
fragment = 1;
hdrlen = 8;
break;
}
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
if (logflags & IP6T_LOG_IPOPT)
sb_add(m, ")");
return;
}
hdrlen = ipv6_optlen(hp);
break;
/* Max Length */
case IPPROTO_AH:
if (logflags & IP6T_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
/* Max length: 3 "AH " */
sb_add(m, "AH ");
if (fragment) {
sb_add(m, ")");
return;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
&_ahdr);
if (ah == NULL) {
/*
* Max length: 26 "INCOMPLETE [65535
* bytes] )"
*/
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 15 "SPI=0xF1234567 */
sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
}
hdrlen = (hp->hdrlen+2)<<2;
break;
case IPPROTO_ESP:
if (logflags & IP6T_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 4 "ESP " */
sb_add(m, "ESP ");
if (fragment) {
sb_add(m, ")");
return;
}
/*
* Max length: 26 "INCOMPLETE [65535 bytes] )"
*/
eh = skb_header_pointer(skb, ptr, sizeof(_esph),
&_esph);
if (eh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 16 "SPI=0xF1234567 )" */
sb_add(m, "SPI=0x%x )", ntohl(eh->spi) );
}
return;
default:
/* Max length: 20 "Unknown Ext Hdr 255" */
sb_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
if (logflags & IP6T_LOG_IPOPT)
sb_add(m, ") ");
currenthdr = hp->nexthdr;
ptr += hdrlen;
}
switch (currenthdr) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
sb_add(m, "PROTO=TCP ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, ptr, sizeof(_tcph), &_tcph);
if (th == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u ",
ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & IP6T_LOG_TCPSEQ)
sb_add(m, "SEQ=%u ACK=%u ",
ntohl(th->seq), ntohl(th->ack_seq));
/* Max length: 13 "WINDOW=65535 " */
sb_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3C " */
sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
sb_add(m, "CWR ");
if (th->ece)
sb_add(m, "ECE ");
if (th->urg)
sb_add(m, "URG ");
if (th->ack)
sb_add(m, "ACK ");
if (th->psh)
sb_add(m, "PSH ");
if (th->rst)
sb_add(m, "RST ");
if (th->syn)
sb_add(m, "SYN ");
if (th->fin)
sb_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & IP6T_LOG_TCPOPT) &&
th->doff * 4 > sizeof(struct tcphdr)) {
u_int8_t _opt[60 - sizeof(struct tcphdr)];
const u_int8_t *op;
unsigned int i;
unsigned int optsize = th->doff * 4
- sizeof(struct tcphdr);
op = skb_header_pointer(skb,
ptr + sizeof(struct tcphdr),
optsize, _opt);
if (op == NULL) {
sb_add(m, "OPT (TRUNCATED)");
return;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
sb_add(m, "OPT (");
for (i =0; i < optsize; i++)
sb_add(m, "%02X", op[i]);
sb_add(m, ") ");
}
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
if (currenthdr == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
sb_add(m, "PROTO=UDP " );
else /* Max length: 14 "PROTO=UDPLITE " */
sb_add(m, "PROTO=UDPLITE ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, ptr, sizeof(_udph), &_udph);
if (uh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u LEN=%u ",
ntohs(uh->source), ntohs(uh->dest),
ntohs(uh->len));
break;
}
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmp6h;
const struct icmp6hdr *ic;
/* Max length: 13 "PROTO=ICMPv6 " */
sb_add(m, "PROTO=ICMPv6 ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
if (ic == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
switch (ic->icmp6_type) {
case ICMPV6_ECHO_REQUEST:
case ICMPV6_ECHO_REPLY:
/* Max length: 19 "ID=65535 SEQ=65535 " */
sb_add(m, "ID=%u SEQ=%u ",
ntohs(ic->icmp6_identifier),
ntohs(ic->icmp6_sequence));
break;
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
break;
case ICMPV6_PARAMPROB:
/* Max length: 17 "POINTER=ffffffff " */
sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
/* Fall through */
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
/* Max length: 3+maxlen */
if (recurse) {
sb_add(m, "[");
dump_packet(m, info, skb,
ptr + sizeof(_icmp6h), 0);
sb_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
}
break;
}
/* Max length: 10 "PROTO=255 " */
default:
sb_add(m, "PROTO=%u ", currenthdr);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
sb_add(m, "UID=%u GID=%u ",
skb->sk->sk_socket->file->f_cred->fsuid,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!recurse && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
}
static void dump_mac_header(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & IP6T_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
sb_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int len = dev->hard_header_len;
unsigned int i;
if (dev->type == ARPHRD_SIT &&
(p -= ETH_HLEN) < skb->head)
p = NULL;
if (p != NULL) {
sb_add(m, "%02x", *p++);
for (i = 1; i < len; i++)
sb_add(m, ":%02x", *p++);
}
sb_add(m, " ");
if (dev->type == ARPHRD_SIT) {
const struct iphdr *iph =
(struct iphdr *)skb_mac_header(skb);
sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
}
} else
sb_add(m, " ");
}
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
static void
ip6t_log_packet(u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
prefix,
in ? in->name : "",
out ? out->name : "");
if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
sb_close(m);
}
static unsigned int
log_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out,
&li, loginfo->prefix);
return XT_CONTINUE;
}
static int log_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
if (loginfo->level >= 8) {
pr_debug("level %u >= 8\n", loginfo->level);
return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
pr_debug("prefix not null-terminated\n");
return -EINVAL;
}
return 0;
}
static struct xt_target log_tg6_reg __read_mostly = {
.name = "LOG",
.family = NFPROTO_IPV6,
.target = log_tg6,
.targetsize = sizeof(struct ip6t_log_info),
.checkentry = log_tg6_check,
.me = THIS_MODULE,
};
static struct nf_logger ip6t_logger __read_mostly = {
.name = "ip6t_LOG",
.logfn = &ip6t_log_packet,
.me = THIS_MODULE,
};
static int __init log_tg6_init(void)
{
int ret;
ret = xt_register_target(&log_tg6_reg);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_IPV6, &ip6t_logger);
return 0;
}
static void __exit log_tg6_exit(void)
{
nf_log_unregister(&ip6t_logger);
xt_unregister_target(&log_tg6_reg);
}
module_init(log_tg6_init);
module_exit(log_tg6_exit);
......@@ -88,25 +88,31 @@ static int icmpv6_print_tuple(struct seq_file *s,
ntohs(tuple->src.u.icmp.id));
}
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
return &nf_ct_icmpv6_timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
static int icmpv6_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeout)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
static const u_int8_t valid_new[] = {
[ICMPV6_ECHO_REQUEST - 128] = 1,
......@@ -270,6 +276,44 @@ static int icmpv6_nlattr_tuple_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeout = data;
if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
} else {
/* Set default ICMPv6 timeout. */
*timeout = nf_ct_icmpv6_timeout;
}
return 0;
}
static int
icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
[CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *icmpv6_sysctl_header;
static struct ctl_table icmpv6_sysctl_table[] = {
......@@ -293,6 +337,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
.invert_tuple = icmpv6_invert_tuple,
.print_tuple = icmpv6_print_tuple,
.packet = icmpv6_packet,
.get_timeouts = icmpv6_get_timeouts,
.new = icmpv6_new,
.error = icmpv6_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
......@@ -301,6 +346,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
.obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_ICMP_MAX,
.obj_size = sizeof(unsigned int),
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &icmpv6_sysctl_header,
.ctl_table = icmpv6_sysctl_table,
......
......@@ -103,6 +103,16 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
config NF_CONNTRACK_TIMEOUT
bool 'Connection tracking timeout'
depends on NETFILTER_ADVANCED
help
This option enables support for connection tracking timeout
extension. This allows you to attach timeout policies to flow
via the CT target.
If unsure, say `N'.
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
......@@ -314,6 +324,17 @@ config NF_CT_NETLINK
help
This option enables support for a netlink-based userspace interface
config NF_CT_NETLINK_TIMEOUT
tristate 'Connection tracking timeout tuning via Netlink'
select NETFILTER_NETLINK
depends on NETFILTER_ADVANCED
help
This option enables support for connection tracking timeout
fine-grain tuning. This allows you to attach specific timeout
policies to flows, instead of using the global timeout policy.
If unsure, say `N'.
endif # NF_CONNTRACK
# transparent proxy support
......@@ -524,6 +545,15 @@ config NETFILTER_XT_TARGET_LED
For more information on the LEDs available on your system, see
Documentation/leds/leds-class.txt
config NETFILTER_XT_TARGET_LOG
tristate "LOG target support"
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in
any iptables table which records the packet header to the syslog.
To compile it as a module, choose M here. If unsure, say N.
config NETFILTER_XT_TARGET_MARK
tristate '"MARK" target support'
depends on NETFILTER_ADVANCED
......
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
......@@ -22,6 +23,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
# connection tracking helpers
nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
......@@ -58,6 +60,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
......
......@@ -442,7 +442,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = AF_INET;
set->family = NFPROTO_IPV4;
return true;
}
......@@ -550,7 +550,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = AF_INET,
.family = NFPROTO_IPV4,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_ip_create,
......
......@@ -543,7 +543,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = AF_INET;
set->family = NFPROTO_IPV4;
return true;
}
......@@ -623,7 +623,7 @@ static struct ip_set_type bitmap_ipmac_type = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
.family = AF_INET,
.family = NFPROTO_IPV4,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_ipmac_create,
......
......@@ -422,7 +422,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = AF_UNSPEC;
set->family = NFPROTO_UNSPEC;
return true;
}
......@@ -483,7 +483,7 @@ static struct ip_set_type bitmap_port_type = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_PORT,
.dimension = IPSET_DIM_ONE,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_port_create,
......
......@@ -69,7 +69,7 @@ find_set_type(const char *name, u8 family, u8 revision)
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name) &&
(type->family == family || type->family == AF_UNSPEC) &&
(type->family == family || type->family == NFPROTO_UNSPEC) &&
revision >= type->revision_min &&
revision <= type->revision_max)
return type;
......@@ -149,7 +149,7 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
rcu_read_lock();
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name) &&
(type->family == family || type->family == AF_UNSPEC)) {
(type->family == family || type->family == NFPROTO_UNSPEC)) {
found = true;
if (type->revision_min < *min)
*min = type->revision_min;
......@@ -164,8 +164,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
__find_set_type_minmax(name, family, min, max, true);
}
#define family_name(f) ((f) == AF_INET ? "inet" : \
(f) == AF_INET6 ? "inet6" : "any")
#define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
(f) == NFPROTO_IPV6 ? "inet6" : "any")
/* Register a set type structure. The type is identified by
* the unique triple of name, family and revision.
......@@ -354,7 +354,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
!(opt->family == set->family || set->family == AF_UNSPEC))
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
read_lock_bh(&set->lock);
......@@ -387,7 +387,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
!(opt->family == set->family || set->family == AF_UNSPEC))
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
write_lock_bh(&set->lock);
......@@ -410,7 +410,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
!(opt->family == set->family || set->family == AF_UNSPEC))
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
write_lock_bh(&set->lock);
......@@ -575,7 +575,7 @@ start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
return NULL;
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = AF_INET;
nfmsg->nfgen_family = NFPROTO_IPV4;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
......
......@@ -136,10 +136,10 @@ ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
u8 proto;
switch (pf) {
case AF_INET:
case NFPROTO_IPV4:
ret = ip_set_get_ip4_port(skb, src, port, &proto);
break;
case AF_INET6:
case NFPROTO_IPV6:
ret = ip_set_get_ip6_port(skb, src, port, &proto);
break;
default:
......
......@@ -366,11 +366,11 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u8 netmask, hbits;
struct ip_set_hash *h;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
netmask = set->family == AF_INET ? 32 : 128;
netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
pr_debug("Create set %s with family %s\n",
set->name, set->family == AF_INET ? "inet" : "inet6");
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
......@@ -389,8 +389,8 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
if ((set->family == AF_INET && netmask > 32) ||
(set->family == AF_INET6 && netmask > 128) ||
if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
(set->family == NFPROTO_IPV6 && netmask > 128) ||
netmask == 0)
return -IPSET_ERR_INVALID_NETMASK;
}
......@@ -419,15 +419,15 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_tvariant : &hash_ip6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_ip4_gc_init(set);
else
hash_ip6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_variant : &hash_ip6_variant;
}
......@@ -443,7 +443,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = hash_ip_create,
......
......@@ -450,7 +450,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
......@@ -490,15 +490,15 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_ipport4_gc_init(set);
else
hash_ipport6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_variant : &hash_ipport6_variant;
}
......@@ -514,7 +514,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* SCTP and UDPLITE support added */
.create = hash_ipport_create,
......
......@@ -468,7 +468,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
......@@ -508,15 +508,15 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_ipportip4_gc_init(set);
else
hash_ipportip6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_variant : &hash_ipportip6_variant;
}
......@@ -532,7 +532,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* SCTP and UDPLITE support added */
.create = hash_ipportip_create,
......
......@@ -41,12 +41,19 @@ hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
/* The type variant functions: IPv4 */
/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
* However this way we have to store internally cidr - 1,
* dancing back and forth.
*/
#define IP_SET_HASH_WITH_NETS_PACKED
/* Member elements without timeout */
struct hash_ipportnet4_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
};
......@@ -55,7 +62,8 @@ struct hash_ipportnet4_telem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
......@@ -85,11 +93,23 @@ hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline bool
hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
{
return !elem->nomatch;
}
static inline void
hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
{
elem->ip2 &= ip_set_netmask(cidr);
elem->cidr = cidr;
elem->cidr = cidr - 1;
}
static inline void
......@@ -102,11 +122,15 @@ static bool
hash_ipportnet4_data_list(struct sk_buff *skb,
const struct hash_ipportnet4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -119,14 +143,17 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
{
const struct hash_ipportnet4_telem *tdata =
(const struct hash_ipportnet4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
......@@ -158,13 +185,11 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
if (data.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
data.cidr = HOST_MASK;
data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
......@@ -172,7 +197,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
data.ip2 &= ip_set_netmask(data.cidr);
data.ip2 &= ip_set_netmask(data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
......@@ -183,17 +208,19 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
......@@ -208,9 +235,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
if (tb[IPSET_ATTR_CIDR2]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!data.cidr)
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1;
}
if (tb[IPSET_ATTR_PORT])
......@@ -236,12 +264,18 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16);
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
tb[IPSET_ATTR_IP2_TO])) {
data.ip = htonl(ip);
data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr));
data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1));
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
......@@ -275,7 +309,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip2_from + UINT_MAX == ip2_to)
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip2_from, ip2_to, data.cidr);
ip_set_mask_from_to(ip2_from, ip2_to, data.cidr + 1);
}
if (retried)
......@@ -290,7 +324,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
while (!after(ip2, ip2_to)) {
data.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&data.cidr);
&cidr);
data.cidr = cidr - 1;
ret = adtfn(set, &data, timeout, flags);
if (ret && !ip_set_eexist(ret, flags))
......@@ -321,7 +356,8 @@ struct hash_ipportnet6_elem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
};
......@@ -329,7 +365,8 @@ struct hash_ipportnet6_telem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
......@@ -359,6 +396,18 @@ hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline bool
hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
{
return !elem->nomatch;
}
static inline void
hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
{
......@@ -378,18 +427,22 @@ static inline void
hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip2, cidr);
elem->cidr = cidr;
elem->cidr = cidr - 1;
}
static bool
hash_ipportnet6_data_list(struct sk_buff *skb,
const struct hash_ipportnet6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -402,14 +455,17 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
{
const struct hash_ipportnet6_telem *e =
(const struct hash_ipportnet6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -438,13 +494,11 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
if (data.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
data.cidr = HOST_MASK;
data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
......@@ -452,7 +506,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
ip6_netmask(&data.ip2, data.cidr);
ip6_netmask(&data.ip2, data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
......@@ -463,16 +517,18 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
struct hash_ipportnet6_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
......@@ -490,13 +546,14 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR2])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!data.cidr)
if (tb[IPSET_ATTR_CIDR2]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1;
}
ip6_netmask(&data.ip2, data.cidr);
ip6_netmask(&data.ip2, data.cidr + 1);
if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
......@@ -521,6 +578,12 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
......@@ -554,7 +617,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
......@@ -573,7 +636,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
......@@ -596,16 +659,16 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_tvariant
: &hash_ipportnet6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_ipportnet4_gc_init(set);
else
hash_ipportnet6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
}
......@@ -621,10 +684,11 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
/* 1 SCTP and UDPLITE support added */
.revision_max = 2, /* Range as input support for IPv4 added */
/* 2 Range as input support for IPv4 added */
.revision_max = 3, /* nomatch flag support added */
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
......@@ -643,6 +707,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
},
......
......@@ -43,7 +43,7 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
struct hash_net4_elem {
__be32 ip;
u16 padding0;
u8 padding1;
u8 nomatch;
u8 cidr;
};
......@@ -51,7 +51,7 @@ struct hash_net4_elem {
struct hash_net4_telem {
__be32 ip;
u16 padding0;
u8 padding1;
u8 nomatch;
u8 cidr;
unsigned long timeout;
};
......@@ -61,7 +61,8 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1,
const struct hash_net4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
return ip1->ip == ip2->ip &&
ip1->cidr == ip2->cidr;
}
static inline bool
......@@ -76,6 +77,19 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
{
dst->ip = src->ip;
dst->cidr = src->cidr;
dst->nomatch = src->nomatch;
}
static inline void
hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
{
dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline bool
hash_net4_data_match(const struct hash_net4_elem *elem)
{
return !elem->nomatch;
}
static inline void
......@@ -95,8 +109,12 @@ hash_net4_data_zero_out(struct hash_net4_elem *elem)
static bool
hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -108,11 +126,14 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
{
const struct hash_net4_telem *tdata =
(const struct hash_net4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
......@@ -167,7 +188,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
......@@ -179,7 +201,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
......@@ -189,6 +211,12 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr));
ret = adtfn(set, &data, timeout, flags);
......@@ -236,14 +264,14 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
struct hash_net6_elem {
union nf_inet_addr ip;
u16 padding0;
u8 padding1;
u8 nomatch;
u8 cidr;
};
struct hash_net6_telem {
union nf_inet_addr ip;
u16 padding0;
u8 padding1;
u8 nomatch;
u8 cidr;
unsigned long timeout;
};
......@@ -269,6 +297,19 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
{
dst->ip.in6 = src->ip.in6;
dst->cidr = src->cidr;
dst->nomatch = src->nomatch;
}
static inline void
hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
{
dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline bool
hash_net6_data_match(const struct hash_net6_elem *elem)
{
return !elem->nomatch;
}
static inline void
......@@ -296,8 +337,12 @@ hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
static bool
hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -309,11 +354,14 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
{
const struct hash_net6_telem *e =
(const struct hash_net6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -366,7 +414,8 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
......@@ -381,7 +430,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
......@@ -392,6 +441,12 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16);
}
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
......@@ -406,7 +461,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
......@@ -425,7 +480,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
......@@ -448,15 +503,15 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_net4_tvariant : &hash_net6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_net4_gc_init(set);
else
hash_net6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_net4_variant : &hash_net6_variant;
}
......@@ -472,9 +527,10 @@ static struct ip_set_type hash_net_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* Range as input support for IPv4 added */
/* = 1 Range as input support for IPv4 added */
.revision_max = 2, /* nomatch flag support added */
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
......@@ -488,6 +544,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
......
......@@ -163,7 +163,8 @@ struct hash_netiface4_elem_hashed {
__be32 ip;
u8 physdev;
u8 cidr;
u16 padding;
u8 nomatch;
u8 padding;
};
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
......@@ -173,7 +174,8 @@ struct hash_netiface4_elem {
__be32 ip;
u8 physdev;
u8 cidr;
u16 padding;
u8 nomatch;
u8 padding;
const char *iface;
};
......@@ -182,7 +184,8 @@ struct hash_netiface4_telem {
__be32 ip;
u8 physdev;
u8 cidr;
u16 padding;
u8 nomatch;
u8 padding;
const char *iface;
unsigned long timeout;
};
......@@ -207,11 +210,25 @@ hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
static inline void
hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
const struct hash_netiface4_elem *src) {
const struct hash_netiface4_elem *src)
{
dst->ip = src->ip;
dst->cidr = src->cidr;
dst->physdev = src->physdev;
dst->iface = src->iface;
dst->nomatch = src->nomatch;
}
static inline void
hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
{
dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline bool
hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
{
return !elem->nomatch;
}
static inline void
......@@ -233,11 +250,13 @@ hash_netiface4_data_list(struct sk_buff *skb,
{
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -252,11 +271,13 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
(const struct hash_netiface4_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
......@@ -361,7 +382,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
......@@ -387,6 +408,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1;
if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
......@@ -440,7 +463,8 @@ struct hash_netiface6_elem_hashed {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u16 padding;
u8 nomatch;
u8 padding;
};
#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
......@@ -449,7 +473,8 @@ struct hash_netiface6_elem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u16 padding;
u8 nomatch;
u8 padding;
const char *iface;
};
......@@ -457,7 +482,8 @@ struct hash_netiface6_telem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u16 padding;
u8 nomatch;
u8 padding;
const char *iface;
unsigned long timeout;
};
......@@ -487,9 +513,22 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
{
dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline bool
hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
{
return !elem->nomatch;
}
static inline void
hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
{
elem->cidr = 0;
}
static inline void
......@@ -514,11 +553,13 @@ hash_netiface6_data_list(struct sk_buff *skb,
{
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -533,11 +574,13 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
(const struct hash_netiface6_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
return 0;
......@@ -636,7 +679,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
......@@ -662,6 +705,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1;
if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
flags |= (cadt_flags << 16);
}
ret = adtfn(set, &data, timeout, flags);
......@@ -678,7 +723,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
......@@ -697,7 +742,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
......@@ -722,15 +767,15 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_netiface4_gc_init(set);
else
hash_netiface6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_variant : &hash_netiface6_variant;
}
......@@ -746,8 +791,9 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
.dimension = IPSET_DIM_TWO,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* nomatch flag support added */
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
......
......@@ -40,12 +40,19 @@ hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
/* The type variant functions: IPv4 */
/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
* However this way we have to store internally cidr - 1,
* dancing back and forth.
*/
#define IP_SET_HASH_WITH_NETS_PACKED
/* Member elements without timeout */
struct hash_netport4_elem {
__be32 ip;
__be16 port;
u8 proto;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
};
/* Member elements with timeout support */
......@@ -53,7 +60,8 @@ struct hash_netport4_telem {
__be32 ip;
__be16 port;
u8 proto;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
unsigned long timeout;
};
......@@ -82,13 +90,26 @@ hash_netport4_data_copy(struct hash_netport4_elem *dst,
dst->port = src->port;
dst->proto = src->proto;
dst->cidr = src->cidr;
dst->nomatch = src->nomatch;
}
static inline void
hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline bool
hash_netport4_data_match(const struct hash_netport4_elem *elem)
{
return !elem->nomatch;
}
static inline void
hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
elem->cidr = cidr;
elem->cidr = cidr - 1;
}
static inline void
......@@ -101,10 +122,14 @@ static bool
hash_netport4_data_list(struct sk_buff *skb,
const struct hash_netport4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -117,13 +142,16 @@ hash_netport4_data_tlist(struct sk_buff *skb,
{
const struct hash_netport4_telem *tdata =
(const struct hash_netport4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
......@@ -154,20 +182,18 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
if (data.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
data.cidr = HOST_MASK;
data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
data.ip &= ip_set_netmask(data.cidr);
data.ip &= ip_set_netmask(data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
......@@ -178,16 +204,18 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = { .cidr = HOST_MASK };
struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to, p = 0, ip = 0, ip_to, last;
u32 timeout = h->timeout;
bool with_ports = false;
u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
......@@ -198,9 +226,10 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1;
}
if (tb[IPSET_ATTR_PORT])
......@@ -227,8 +256,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr));
data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1));
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
......@@ -248,14 +284,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip, ip_to, data.cidr);
ip_set_mask_from_to(ip, ip_to, data.cidr + 1);
}
if (retried)
ip = h->next.ip;
while (!after(ip, ip_to)) {
data.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
data.cidr = cidr - 1;
p = retried && ip == h->next.ip ? h->next.port : port;
for (; p <= port_to; p++) {
data.port = htons(p);
......@@ -288,14 +325,16 @@ struct hash_netport6_elem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
};
struct hash_netport6_telem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
u8 cidr;
u8 cidr:7;
u8 nomatch:1;
unsigned long timeout;
};
......@@ -323,6 +362,18 @@ hash_netport6_data_copy(struct hash_netport6_elem *dst,
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline bool
hash_netport6_data_match(const struct hash_netport6_elem *elem)
{
return !elem->nomatch;
}
static inline void
hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
{
......@@ -342,17 +393,21 @@ static inline void
hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
elem->cidr = cidr;
elem->cidr = cidr - 1;
}
static bool
hash_netport6_data_list(struct sk_buff *skb,
const struct hash_netport6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -365,13 +420,16 @@ hash_netport6_data_tlist(struct sk_buff *skb,
{
const struct hash_netport6_telem *e =
(const struct hash_netport6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
if (flags)
NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
......@@ -400,20 +458,18 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
};
if (data.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
data.cidr = HOST_MASK;
data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
ip6_netmask(&data.ip, data.cidr);
ip6_netmask(&data.ip, data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
......@@ -424,16 +480,18 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = { .cidr = HOST_MASK };
struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
......@@ -445,11 +503,13 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
if (tb[IPSET_ATTR_CIDR]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
data.cidr = cidr - 1;
}
ip6_netmask(&data.ip, data.cidr + 1);
if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
......@@ -474,6 +534,12 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
......@@ -507,7 +573,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
......@@ -526,7 +592,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
......@@ -549,15 +615,15 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_tvariant : &hash_netport6_tvariant;
if (set->family == AF_INET)
if (set->family == NFPROTO_IPV4)
hash_netport4_gc_init(set);
else
hash_netport6_gc_init(set);
} else {
set->variant = set->family == AF_INET
set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_variant : &hash_netport6_variant;
}
......@@ -573,10 +639,11 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
/* 1 SCTP and UDPLITE support added */
.revision_max = 2, /* Range as input support for IPv4 added */
/* 2, Range as input support for IPv4 added */
.revision_max = 3, /* nomatch flag support added */
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
......@@ -595,6 +662,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
......
......@@ -575,7 +575,7 @@ static struct ip_set_type list_set_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
.dimension = IPSET_DIM_ONE,
.family = AF_UNSPEC,
.family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = list_set_create,
......
......@@ -44,6 +44,7 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
......@@ -763,7 +764,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
unsigned int dataoff, u32 hash,
unsigned int *timeouts)
{
struct nf_conn *ct;
struct nf_conn_help *help;
......@@ -782,7 +784,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
if (!l4proto->new(ct, skb, dataoff)) {
if (!l4proto->new(ct, skb, dataoff, timeouts)) {
nf_conntrack_free(ct);
pr_debug("init conntrack: can't track with proto module\n");
return NULL;
......@@ -848,7 +850,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
int *set_reply,
enum ip_conntrack_info *ctinfo)
enum ip_conntrack_info *ctinfo,
unsigned int *timeouts)
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
......@@ -868,7 +871,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
skb, dataoff, hash);
skb, dataoff, hash, timeouts);
if (!h)
return NULL;
if (IS_ERR(h))
......@@ -909,6 +912,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
enum ip_conntrack_info ctinfo;
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
struct nf_conn_timeout *timeout_ext;
unsigned int *timeouts;
unsigned int dataoff;
u_int8_t protonum;
int set_reply = 0;
......@@ -955,8 +960,19 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
goto out;
}
/* Decide what timeout policy we want to apply to this flow. */
if (tmpl) {
timeout_ext = nf_ct_timeout_find(tmpl);
if (timeout_ext)
timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
else
timeouts = l4proto->get_timeouts(net);
} else
timeouts = l4proto->get_timeouts(net);
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
l3proto, l4proto, &set_reply, &ctinfo);
l3proto, l4proto, &set_reply, &ctinfo,
timeouts);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(net, invalid);
......@@ -973,7 +989,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
NF_CT_ASSERT(skb->nfct);
ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
......@@ -1327,6 +1343,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
}
nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
nf_conntrack_timeout_fini(net);
nf_conntrack_ecache_fini(net);
nf_conntrack_tstamp_fini(net);
nf_conntrack_acct_fini(net);
......@@ -1558,9 +1575,14 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_ecache_init(net);
if (ret < 0)
goto err_ecache;
ret = nf_conntrack_timeout_init(net);
if (ret < 0)
goto err_timeout;
return 0;
err_timeout:
nf_conntrack_timeout_fini(net);
err_ecache:
nf_conntrack_tstamp_fini(net);
err_tstamp:
......
......@@ -32,9 +32,11 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex);
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
unsigned long events;
unsigned long events, missed;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
struct nf_ct_event item;
int ret;
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
......@@ -47,31 +49,32 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
events = xchg(&e->cache, 0);
if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
struct nf_ct_event item = {
.ct = ct,
.pid = 0,
.report = 0
};
int ret;
if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
goto out_unlock;
/* We make a copy of the missed event cache without taking
* the lock, thus we may send missed events twice. However,
* this does not harm and it happens very rarely. */
unsigned long missed = e->missed;
missed = e->missed;
if (!((events | missed) & e->ctmask))
goto out_unlock;
item.ct = ct;
item.pid = 0;
item.report = 0;
ret = notify->fcn(events | missed, &item);
if (unlikely(ret < 0 || missed)) {
if (likely(ret >= 0 && !missed))
goto out_unlock;
spin_lock_bh(&ct->lock);
if (ret < 0)
e->missed |= events;
else
e->missed &= ~missed;
spin_unlock_bh(&ct->lock);
}
}
out_unlock:
rcu_read_unlock();
......
......@@ -181,6 +181,60 @@ void nf_ct_helper_destroy(struct nf_conn *ct)
}
}
static LIST_HEAD(nf_ct_helper_expectfn_list);
void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
{
spin_lock_bh(&nf_conntrack_lock);
list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
spin_unlock_bh(&nf_conntrack_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
{
spin_lock_bh(&nf_conntrack_lock);
list_del_rcu(&n->head);
spin_unlock_bh(&nf_conntrack_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_name(const char *name)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
rcu_read_lock();
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (!strcmp(cur->name, name)) {
found = true;
break;
}
}
rcu_read_unlock();
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
rcu_read_lock();
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (cur->expectfn == symbol) {
found = true;
break;
}
}
rcu_read_unlock();
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
unsigned int h = helper_hash(&me->tuple);
......
......@@ -110,15 +110,16 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
rcu_read_lock();
l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
if (unlikely(ret < 0))
return ret;
l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
if (ret >= 0) {
l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
}
rcu_read_unlock();
return ret;
}
......@@ -712,9 +713,11 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
int res;
#ifdef CONFIG_NF_CONNTRACK_MARK
const struct ctnetlink_dump_filter *filter = cb->data;
#endif
spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
......@@ -740,11 +743,14 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
continue;
}
#endif
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
rcu_read_lock();
res =
ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(
cb->nlh->nlmsg_type),
ct) < 0) {
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct);
rcu_read_unlock();
if (res < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
goto out;
......@@ -1649,14 +1655,16 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
if (!nest_parms)
goto nla_put_failure;
rcu_read_lock();
l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
if (unlikely(ret < 0))
goto nla_put_failure;
l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
if (ret >= 0) {
l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
}
rcu_read_unlock();
if (unlikely(ret < 0))
goto nla_put_failure;
......@@ -1675,6 +1683,11 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
struct nf_conn *master = exp->master;
long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
struct nf_conn_help *help;
#ifdef CONFIG_NF_NAT_NEEDED
struct nlattr *nest_parms;
struct nf_conntrack_tuple nat_tuple = {};
#endif
struct nf_ct_helper_expectfn *expfn;
if (timeout < 0)
timeout = 0;
......@@ -1688,9 +1701,29 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
CTA_EXPECT_MASTER) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_NAT_NEEDED
if (exp->saved_ip || exp->saved_proto.all) {
nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
nat_tuple.src.l3num = nf_ct_l3num(master);
nat_tuple.src.u3.ip = exp->saved_ip;
nat_tuple.dst.protonum = nf_ct_protonum(master);
nat_tuple.src.u = exp->saved_proto;
if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
CTA_EXPECT_NAT_TUPLE) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
}
#endif
NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
help = nfct_help(master);
if (help) {
struct nf_conntrack_helper *helper;
......@@ -1699,6 +1732,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
if (helper)
NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
}
expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
if (expfn != NULL)
NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
return 0;
......@@ -1856,6 +1892,9 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
[CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
[CTA_EXPECT_ZONE] = { .type = NLA_U16 },
[CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
[CTA_EXPECT_CLASS] = { .type = NLA_U32 },
[CTA_EXPECT_NAT] = { .type = NLA_NESTED },
[CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
};
static int
......@@ -2031,6 +2070,41 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
return -EOPNOTSUPP;
}
static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
[CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
[CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
};
static int
ctnetlink_parse_expect_nat(const struct nlattr *attr,
struct nf_conntrack_expect *exp,
u_int8_t u3)
{
#ifdef CONFIG_NF_NAT_NEEDED
struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
struct nf_conntrack_tuple nat_tuple = {};
int err;
nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
return -EINVAL;
err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
&nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
if (err < 0)
return err;
exp->saved_ip = nat_tuple.src.u3.ip;
exp->saved_proto = nat_tuple.src.u;
exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
return 0;
#else
return -EOPNOTSUPP;
#endif
}
static int
ctnetlink_create_expect(struct net *net, u16 zone,
const struct nlattr * const cda[],
......@@ -2042,6 +2116,8 @@ ctnetlink_create_expect(struct net *net, u16 zone,
struct nf_conntrack_expect *exp;
struct nf_conn *ct;
struct nf_conn_help *help;
struct nf_conntrack_helper *helper = NULL;
u_int32_t class = 0;
int err = 0;
/* caller guarantees that those three CTA_EXPECT_* exist */
......@@ -2060,6 +2136,40 @@ ctnetlink_create_expect(struct net *net, u16 zone,
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
/* Look for helper of this expectation */
if (cda[CTA_EXPECT_HELP_NAME]) {
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper == NULL) {
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
err = -EOPNOTSUPP;
goto out;
}
helper = __nf_conntrack_helper_find(helpname,
nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper) {
err = -EAGAIN;
goto out;
}
#endif
err = -EOPNOTSUPP;
goto out;
}
}
if (cda[CTA_EXPECT_CLASS] && helper) {
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
if (class > helper->expect_class_max) {
err = -EINVAL;
goto out;
}
}
exp = nf_ct_expect_alloc(ct);
if (!exp) {
err = -ENOMEM;
......@@ -2086,18 +2196,35 @@ ctnetlink_create_expect(struct net *net, u16 zone,
} else
exp->flags = 0;
}
if (cda[CTA_EXPECT_FN]) {
const char *name = nla_data(cda[CTA_EXPECT_FN]);
struct nf_ct_helper_expectfn *expfn;
exp->class = 0;
expfn = nf_ct_helper_expectfn_find_by_name(name);
if (expfn == NULL) {
err = -EINVAL;
goto err_out;
}
exp->expectfn = expfn->expectfn;
} else
exp->expectfn = NULL;
exp->class = class;
exp->master = ct;
exp->helper = NULL;
exp->helper = helper;
memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
exp->mask.src.u.all = mask.src.u.all;
if (cda[CTA_EXPECT_NAT]) {
err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
exp, u3);
if (err < 0)
goto err_out;
}
err = nf_ct_expect_related_report(exp, pid, report);
err_out:
nf_ct_expect_put(exp);
out:
nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
return err;
......
......@@ -423,7 +423,7 @@ static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
}
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
......@@ -472,12 +472,17 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
ntohl(dhack->dccph_ack_nr_low);
}
static unsigned int *dccp_get_timeouts(struct net *net)
{
return dccp_pernet(net)->dccp_timeout;
}
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
u_int8_t pf, unsigned int hooknum)
u_int8_t pf, unsigned int hooknum,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
......@@ -559,8 +564,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
dn = dccp_pernet(net);
nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
......@@ -702,8 +706,60 @@ static int dccp_nlattr_size(void)
return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
+ nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
struct dccp_net *dn = dccp_pernet(&init_net);
unsigned int *timeouts = data;
int i;
/* set default DCCP timeouts. */
for (i=0; i<CT_DCCP_MAX; i++)
timeouts[i] = dn->dccp_timeout[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
if (tb[i]) {
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
return 0;
}
static int
dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
int i;
for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
[CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
static struct ctl_table dccp_sysctl_table[] = {
......@@ -767,6 +823,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
.get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
......@@ -779,6 +836,15 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_DCCP_MAX,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
......@@ -789,6 +855,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
.get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
......@@ -801,6 +868,15 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_DCCP_MAX,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static __net_init int dccp_net_init(struct net *net)
......
......@@ -40,25 +40,70 @@ static int generic_print_tuple(struct seq_file *s,
return 0;
}
static unsigned int *generic_get_timeouts(struct net *net)
{
return &nf_ct_generic_timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
static int packet(struct nf_conn *ct,
static int generic_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeout)
{
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout);
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeout = data;
if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
else {
/* Set default generic timeout. */
*timeout = nf_ct_generic_timeout;
}
return 0;
}
static int
generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
[CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *generic_sysctl_header;
static struct ctl_table generic_sysctl_table[] = {
......@@ -93,8 +138,18 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
.pkt_to_tuple = generic_pkt_to_tuple,
.invert_tuple = generic_invert_tuple,
.print_tuple = generic_print_tuple,
.packet = packet,
.new = new,
.packet = generic_packet,
.get_timeouts = generic_get_timeouts,
.new = generic_new,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
.obj_to_nlattr = generic_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_GENERIC_MAX,
.obj_size = sizeof(unsigned int),
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &generic_sysctl_header,
.ctl_table = generic_sysctl_table,
......
......@@ -41,8 +41,16 @@
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#define GRE_TIMEOUT (30 * HZ)
#define GRE_STREAM_TIMEOUT (180 * HZ)
enum grep_conntrack {
GRE_CT_UNREPLIED,
GRE_CT_REPLIED,
GRE_CT_MAX
};
static unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_UNREPLIED] = 30*HZ,
[GRE_CT_REPLIED] = 180*HZ,
};
static int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
......@@ -227,13 +235,19 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
(ct->proto.gre.stream_timeout / HZ));
}
static unsigned int *gre_get_timeouts(struct net *net)
{
return gre_timeouts;
}
/* Returns verdict for packet, and may modify conntrack */
static int gre_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
......@@ -252,15 +266,15 @@ static int gre_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
pr_debug(": ");
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
/* initialize to sane value. Ideally a conntrack helper
* (e.g. in case of pptp) is increasing them */
ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
ct->proto.gre.timeout = GRE_TIMEOUT;
ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
return true;
}
......@@ -278,6 +292,52 @@ static void gre_destroy(struct nf_conn *ct)
nf_ct_gre_keymap_destroy(master);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeouts = data;
/* set default timeouts for GRE. */
timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
timeouts[GRE_CT_UNREPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
}
if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
timeouts[GRE_CT_REPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
}
return 0;
}
static int
gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
htonl(timeouts[GRE_CT_REPLIED] / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
[CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
/* protocol helper struct */
static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.l3proto = AF_INET,
......@@ -287,6 +347,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.invert_tuple = gre_invert_tuple,
.print_tuple = gre_print_tuple,
.print_conntrack = gre_print_conntrack,
.get_timeouts = gre_get_timeouts,
.packet = gre_packet,
.new = gre_new,
.destroy = gre_destroy,
......@@ -297,6 +358,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = gre_timeout_nlattr_to_obj,
.obj_to_nlattr = gre_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_GRE_MAX,
.obj_size = sizeof(unsigned int) * GRE_CT_MAX,
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static int proto_gre_net_init(struct net *net)
......
......@@ -279,13 +279,19 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
return sctp_conntracks[dir][i][cur_state];
}
static unsigned int *sctp_get_timeouts(struct net *net)
{
return sctp_timeouts;
}
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
static int sctp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeouts)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
......@@ -370,7 +376,7 @@ static int sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]);
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
dir == IP_CT_DIR_REPLY &&
......@@ -390,7 +396,7 @@ static int sctp_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
enum sctp_conntrack new_state;
const struct sctphdr *sh;
......@@ -543,6 +549,57 @@ static int sctp_nlattr_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeouts = data;
int i;
/* set default SCTP timeouts. */
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
timeouts[i] = sctp_timeouts[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
if (tb[i]) {
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
return 0;
}
static int
sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
int i;
for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
[CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static unsigned int sctp_sysctl_table_users;
static struct ctl_table_header *sctp_sysctl_header;
......@@ -664,6 +721,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.print_tuple = sctp_print_tuple,
.print_conntrack = sctp_print_conntrack,
.packet = sctp_packet,
.get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -675,6 +733,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_SCTP_MAX,
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &sctp_sysctl_table_users,
.ctl_table_header = &sctp_sysctl_header,
......@@ -694,6 +761,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.print_tuple = sctp_print_tuple,
.print_conntrack = sctp_print_conntrack,
.packet = sctp_packet,
.get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -704,6 +772,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_SCTP_MAX,
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
#ifdef CONFIG_SYSCTL
.ctl_table_users = &sctp_sysctl_table_users,
......
......@@ -64,13 +64,7 @@ static const char *const tcp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS;
static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
......@@ -80,6 +74,11 @@ static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
[TCP_CONNTRACK_RETRANS] = 5 MINS,
[TCP_CONNTRACK_UNACK] = 5 MINS,
};
#define sNO TCP_CONNTRACK_NONE
......@@ -814,13 +813,19 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
static unsigned int *tcp_get_timeouts(struct net *net)
{
return tcp_timeouts;
}
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple *tuple;
......@@ -1015,14 +1020,14 @@ static int tcp_packet(struct nf_conn *ct,
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans)
timeout = nf_ct_tcp_timeout_max_retrans;
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged)
timeout = nf_ct_tcp_timeout_unacknowledged;
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
else
timeout = tcp_timeouts[new_state];
timeout = timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
......@@ -1054,7 +1059,7 @@ static int tcp_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
enum tcp_conntrack new_state;
const struct tcphdr *th;
......@@ -1239,6 +1244,113 @@ static int tcp_nlattr_tuple_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeouts = data;
int i;
/* set default TCP timeouts. */
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
timeouts[i] = tcp_timeouts[i];
if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
timeouts[TCP_CONNTRACK_ESTABLISHED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
timeouts[TCP_CONNTRACK_FIN_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
timeouts[TCP_CONNTRACK_LAST_ACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
timeouts[TCP_CONNTRACK_TIME_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
timeouts[TCP_CONNTRACK_CLOSE] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
timeouts[TCP_CONNTRACK_SYN_SENT2] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
timeouts[TCP_CONNTRACK_RETRANS] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_UNACK]) {
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
return 0;
}
static int
tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
[CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static unsigned int tcp_sysctl_table_users;
static struct ctl_table_header *tcp_sysctl_header;
......@@ -1301,14 +1413,14 @@ static struct ctl_table tcp_sysctl_table[] = {
},
{
.procname = "nf_conntrack_tcp_timeout_max_retrans",
.data = &nf_ct_tcp_timeout_max_retrans,
.data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
.data = &nf_ct_tcp_timeout_unacknowledged,
.data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
......@@ -1404,7 +1516,7 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
},
{
.procname = "ip_conntrack_tcp_timeout_max_retrans",
.data = &nf_ct_tcp_timeout_max_retrans,
.data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
......@@ -1445,6 +1557,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
.get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -1456,6 +1569,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_TCP_MAX,
.obj_size = sizeof(unsigned int) *
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
......@@ -1477,6 +1600,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
.get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -1488,6 +1612,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_TCP_MAX,
.obj_size = sizeof(unsigned int) *
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
......
......@@ -25,8 +25,16 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
enum udp_conntrack {
UDP_CT_UNREPLIED,
UDP_CT_REPLIED,
UDP_CT_MAX
};
static unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_UNREPLIED] = 30*HZ,
[UDP_CT_REPLIED] = 180*HZ,
};
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
......@@ -63,30 +71,38 @@ static int udp_print_tuple(struct seq_file *s,
ntohs(tuple->dst.u.udp.port));
}
static unsigned int *udp_get_timeouts(struct net *net)
{
return udp_timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
static int udp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
} else {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
......@@ -136,20 +152,66 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeouts = data;
/* set default timeouts for UDP. */
timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
timeouts[UDP_CT_UNREPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
}
if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
timeouts[UDP_CT_REPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
}
return 0;
}
static int
udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
htonl(timeouts[UDP_CT_REPLIED] / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
[CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static unsigned int udp_sysctl_table_users;
static struct ctl_table_header *udp_sysctl_header;
static struct ctl_table udp_sysctl_table[] = {
{
.procname = "nf_conntrack_udp_timeout",
.data = &nf_ct_udp_timeout,
.data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udp_timeout_stream",
.data = &nf_ct_udp_timeout_stream,
.data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
......@@ -160,14 +222,14 @@ static struct ctl_table udp_sysctl_table[] = {
static struct ctl_table udp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_udp_timeout",
.data = &nf_ct_udp_timeout,
.data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_udp_timeout_stream",
.data = &nf_ct_udp_timeout_stream,
.data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
......@@ -186,6 +248,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.invert_tuple = udp_invert_tuple,
.print_tuple = udp_print_tuple,
.packet = udp_packet,
.get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -194,6 +257,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDP_MAX,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udp_sysctl_table_users,
.ctl_table_header = &udp_sysctl_header,
......@@ -214,6 +286,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.invert_tuple = udp_invert_tuple,
.print_tuple = udp_print_tuple,
.packet = udp_packet,
.get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -222,6 +295,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDP_MAX,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udp_sysctl_table_users,
.ctl_table_header = &udp_sysctl_header,
......
......@@ -24,8 +24,16 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_log.h>
static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ;
static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ;
enum udplite_conntrack {
UDPLITE_CT_UNREPLIED,
UDPLITE_CT_REPLIED,
UDPLITE_CT_MAX
};
static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
[UDPLITE_CT_UNREPLIED] = 30*HZ,
[UDPLITE_CT_REPLIED] = 180*HZ,
};
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
......@@ -60,31 +68,38 @@ static int udplite_print_tuple(struct seq_file *s,
ntohs(tuple->dst.u.udp.port));
}
static unsigned int *udplite_get_timeouts(struct net *net)
{
return udplite_timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
static int udplite_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
unsigned int hooknum,
unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
nf_ct_udplite_timeout_stream);
timeouts[UDPLITE_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
} else {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDPLITE_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
......@@ -141,20 +156,66 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
{
unsigned int *timeouts = data;
/* set default timeouts for UDPlite. */
timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
timeouts[UDPLITE_CT_UNREPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
}
if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
timeouts[UDPLITE_CT_REPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
}
return 0;
}
static int
udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
[CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static unsigned int udplite_sysctl_table_users;
static struct ctl_table_header *udplite_sysctl_header;
static struct ctl_table udplite_sysctl_table[] = {
{
.procname = "nf_conntrack_udplite_timeout",
.data = &nf_ct_udplite_timeout,
.data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udplite_timeout_stream",
.data = &nf_ct_udplite_timeout_stream,
.data = &udplite_timeouts[UDPLITE_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
......@@ -172,6 +233,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
.get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -180,6 +242,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = udplite_timeout_nlattr_to_obj,
.obj_to_nlattr = udplite_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
.obj_size = sizeof(unsigned int) *
CTA_TIMEOUT_UDPLITE_MAX,
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udplite_sysctl_table_users,
.ctl_table_header = &udplite_sysctl_header,
......@@ -196,6 +268,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
.get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
......@@ -204,6 +277,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = udplite_timeout_nlattr_to_obj,
.obj_to_nlattr = udplite_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
.obj_size = sizeof(unsigned int) *
CTA_TIMEOUT_UDPLITE_MAX,
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udplite_sysctl_table_users,
.ctl_table_header = &udplite_sysctl_header,
......
/*
* (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
* (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation (or any later at your option).
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_timeout.h>
struct ctnl_timeout *
(*nf_ct_timeout_find_get_hook)(const char *name) __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook);
void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook);
static struct nf_ct_ext_type timeout_extend __read_mostly = {
.len = sizeof(struct nf_conn_timeout),
.align = __alignof__(struct nf_conn_timeout),
.id = NF_CT_EXT_TIMEOUT,
};
int nf_conntrack_timeout_init(struct net *net)
{
int ret = 0;
if (net_eq(net, &init_net)) {
ret = nf_ct_extend_register(&timeout_extend);
if (ret < 0) {
printk(KERN_ERR "nf_ct_timeout: Unable to register "
"timeout extension.\n");
return ret;
}
}
return 0;
}
void nf_conntrack_timeout_fini(struct net *net)
{
if (net_eq(net, &init_net))
nf_ct_extend_unregister(&timeout_extend);
}
/*
* (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
* (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation (or any later at your option).
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
#include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tuning");
static LIST_HEAD(cttimeout_list);
static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
[CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
[CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
[CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
[CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
};
static int
ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
struct nf_conntrack_l4proto *l4proto,
const struct nlattr *attr)
{
int ret = 0;
if (likely(l4proto->ctnl_timeout.nlattr_to_obj)) {
struct nlattr *tb[l4proto->ctnl_timeout.nlattr_max+1];
nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
attr, l4proto->ctnl_timeout.nla_policy);
ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
}
return ret;
}
static int
cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
__u16 l3num;
__u8 l4num;
struct nf_conntrack_l4proto *l4proto;
struct ctnl_timeout *timeout, *matching = NULL;
char *name;
int ret;
if (!cda[CTA_TIMEOUT_NAME] ||
!cda[CTA_TIMEOUT_L3PROTO] ||
!cda[CTA_TIMEOUT_L4PROTO] ||
!cda[CTA_TIMEOUT_DATA])
return -EINVAL;
name = nla_data(cda[CTA_TIMEOUT_NAME]);
l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
list_for_each_entry(timeout, &cttimeout_list, head) {
if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
matching = timeout;
break;
}
l4proto = __nf_ct_l4proto_find(l3num, l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num)
return -EOPNOTSUPP;
if (matching) {
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
/* You cannot replace one timeout policy by another of
* different kind, sorry.
*/
if (matching->l3num != l3num ||
matching->l4num != l4num)
return -EINVAL;
ret = ctnl_timeout_parse_policy(matching, l4proto,
cda[CTA_TIMEOUT_DATA]);
return ret;
}
return -EBUSY;
}
timeout = kzalloc(sizeof(struct ctnl_timeout) +
l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
if (timeout == NULL)
return -ENOMEM;
ret = ctnl_timeout_parse_policy(timeout, l4proto,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
timeout->l3num = l3num;
timeout->l4num = l4num;
atomic_set(&timeout->refcnt, 1);
list_add_tail_rcu(&timeout->head, &cttimeout_list);
return 0;
err:
kfree(timeout);
return ret;
}
static int
ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
int event, struct ctnl_timeout *timeout)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
unsigned int flags = pid ? NLM_F_MULTI : 0;
struct nf_conntrack_l4proto *l4proto;
event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
if (nlh == NULL)
goto nlmsg_failure;
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = AF_UNSPEC;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4num);
NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
htonl(atomic_read(&timeout->refcnt)));
l4proto = __nf_ct_l4proto_find(timeout->l3num, timeout->l4num);
/* If the timeout object does not match the layer 4 protocol tracker,
* then skip dumping the data part since we don't know how to
* interpret it. This may happen for UPDlite, SCTP and DCCP since
* you can unload the module.
*/
if (timeout->l4num != l4proto->l4proto)
goto out;
if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
struct nlattr *nest_parms;
int ret;
nest_parms = nla_nest_start(skb,
CTA_TIMEOUT_DATA | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
if (ret < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
}
out:
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctnl_timeout *cur, *last;
if (cb->args[2])
return 0;
last = (struct ctnl_timeout *)cb->args[1];
if (cb->args[1])
cb->args[1] = 0;
rcu_read_lock();
list_for_each_entry_rcu(cur, &cttimeout_list, head) {
if (last && cur != last)
continue;
if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
cb->args[1] = (unsigned long)cur;
break;
}
}
if (!cb->args[1])
cb->args[2] = 1;
rcu_read_unlock();
return skb->len;
}
static int
cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
int ret = -ENOENT;
char *name;
struct ctnl_timeout *cur;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnl_timeout_dump,
};
return netlink_dump_start(ctnl, skb, nlh, &c);
}
if (!cda[CTA_TIMEOUT_NAME])
return -EINVAL;
name = nla_data(cda[CTA_TIMEOUT_NAME]);
list_for_each_entry(cur, &cttimeout_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
MSG_DONTWAIT);
if (ret > 0)
ret = 0;
/* this avoids a loop in nfnetlink. */
return ret == -EAGAIN ? -ENOBUFS : ret;
}
return ret;
}
/* try to delete object, fail if it is still in use. */
static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
{
int ret = 0;
/* we want to avoid races with nf_ct_timeout_find_get. */
if (atomic_dec_and_test(&timeout->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head);
kfree_rcu(timeout, rcu_head);
} else {
/* still in use, restore reference counter. */
atomic_inc(&timeout->refcnt);
ret = -EBUSY;
}
return ret;
}
static int
cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
char *name;
struct ctnl_timeout *cur;
int ret = -ENOENT;
if (!cda[CTA_TIMEOUT_NAME]) {
list_for_each_entry(cur, &cttimeout_list, head)
ctnl_timeout_try_del(cur);
return 0;
}
name = nla_data(cda[CTA_TIMEOUT_NAME]);
list_for_each_entry(cur, &cttimeout_list, head) {
if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
ret = ctnl_timeout_try_del(cur);
if (ret < 0)
return ret;
break;
}
return ret;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
{
struct ctnl_timeout *timeout, *matching = NULL;
rcu_read_lock();
list_for_each_entry_rcu(timeout, &cttimeout_list, head) {
if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
if (!try_module_get(THIS_MODULE))
goto err;
if (!atomic_inc_not_zero(&timeout->refcnt)) {
module_put(THIS_MODULE);
goto err;
}
matching = timeout;
break;
}
err:
rcu_read_unlock();
return matching;
}
static void ctnl_timeout_put(struct ctnl_timeout *timeout)
{
atomic_dec(&timeout->refcnt);
module_put(THIS_MODULE);
}
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
[IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy },
[IPCTNL_MSG_TIMEOUT_GET] = { .call = cttimeout_get_timeout,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy },
[IPCTNL_MSG_TIMEOUT_DELETE] = { .call = cttimeout_del_timeout,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy },
};
static const struct nfnetlink_subsystem cttimeout_subsys = {
.name = "conntrack_timeout",
.subsys_id = NFNL_SUBSYS_CTNETLINK_TIMEOUT,
.cb_count = IPCTNL_MSG_TIMEOUT_MAX,
.cb = cttimeout_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_TIMEOUT);
static int __init cttimeout_init(void)
{
int ret;
ret = nfnetlink_subsys_register(&cttimeout_subsys);
if (ret < 0) {
pr_err("cttimeout_init: cannot register cttimeout with "
"nfnetlink.\n");
goto err_out;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put);
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
return 0;
err_out:
return ret;
}
static void __exit cttimeout_exit(void)
{
struct ctnl_timeout *cur, *tmp;
pr_info("cttimeout: unregistering from nfnetlink.\n");
nfnetlink_subsys_unregister(&cttimeout_subsys);
list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
list_del_rcu(&cur->head);
/* We are sure that our objects have no clients at this point,
* it's safe to release them all without checking refcnt.
*/
kfree_rcu(cur, rcu_head);
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
}
module_init(cttimeout_init);
module_exit(cttimeout_exit);
......@@ -16,9 +16,10 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
static unsigned int xt_ct_target(struct sk_buff *skb,
static unsigned int xt_ct_target_v0(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct xt_ct_target_info *info = par->targinfo;
......@@ -35,6 +36,23 @@ static unsigned int xt_ct_target(struct sk_buff *skb,
return XT_CONTINUE;
}
static unsigned int xt_ct_target_v1(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct xt_ct_target_info_v1 *info = par->targinfo;
struct nf_conn *ct = info->ct;
/* Previously seen (loopback)? Ignore. */
if (skb->nfct != NULL)
return XT_CONTINUE;
atomic_inc(&ct->ct_general.use);
skb->nfct = &ct->ct_general;
skb->nfctinfo = IP_CT_NEW;
return XT_CONTINUE;
}
static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
{
if (par->family == NFPROTO_IPV4) {
......@@ -53,7 +71,7 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
return 0;
}
static int xt_ct_tg_check(const struct xt_tgchk_param *par)
static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conntrack_tuple t;
......@@ -130,7 +148,137 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
return ret;
}
static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info_v1 *info = par->targinfo;
struct nf_conntrack_tuple t;
struct nf_conn_help *help;
struct nf_conn *ct;
int ret = 0;
u8 proto;
if (info->flags & ~XT_CT_NOTRACK)
return -EINVAL;
if (info->flags & XT_CT_NOTRACK) {
ct = nf_ct_untracked_get();
atomic_inc(&ct->ct_general.use);
goto out;
}
#ifndef CONFIG_NF_CONNTRACK_ZONES
if (info->zone)
goto err1;
#endif
ret = nf_ct_l3proto_try_module_get(par->family);
if (ret < 0)
goto err1;
memset(&t, 0, sizeof(t));
ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
ret = PTR_ERR(ct);
if (IS_ERR(ct))
goto err2;
ret = 0;
if ((info->ct_events || info->exp_events) &&
!nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
GFP_KERNEL))
goto err3;
if (info->helper[0]) {
ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto) {
pr_info("You must specify a L4 protocol, "
"and not use inversions on it.\n");
goto err3;
}
ret = -ENOMEM;
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
if (help == NULL)
goto err3;
ret = -ENOENT;
help->helper = nf_conntrack_helper_try_module_get(info->helper,
par->family,
proto);
if (help->helper == NULL) {
pr_info("No such helper \"%s\"\n", info->helper);
goto err3;
}
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
if (info->timeout) {
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct ctnl_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
timeout_find_get =
rcu_dereference(nf_ct_timeout_find_get_hook);
if (timeout_find_get) {
const struct ipt_entry *e = par->entryinfo;
if (e->ip.invflags & IPT_INV_PROTO) {
ret = -EINVAL;
pr_info("You cannot use inversion on "
"L4 protocol\n");
goto err3;
}
timeout = timeout_find_get(info->timeout);
if (timeout == NULL) {
ret = -ENOENT;
pr_info("No such timeout policy \"%s\"\n",
info->timeout);
goto err3;
}
if (timeout->l3num != par->family) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be "
"used by L3 protocol number %d\n",
info->timeout, timeout->l3num);
goto err3;
}
if (timeout->l4num != e->ip.proto) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be "
"used by L4 protocol number %d\n",
info->timeout, timeout->l4num);
goto err3;
}
timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
GFP_KERNEL);
if (timeout_ext == NULL) {
ret = -ENOMEM;
goto err3;
}
} else {
ret = -ENOENT;
pr_info("Timeout policy base is empty\n");
goto err3;
}
}
#endif
__set_bit(IPS_TEMPLATE_BIT, &ct->status);
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
out:
info->ct = ct;
return 0;
err3:
nf_conntrack_free(ct);
err2:
nf_ct_l3proto_module_put(par->family);
err1:
return ret;
}
static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
......@@ -146,25 +294,67 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
nf_ct_put(info->ct);
}
static struct xt_target xt_ct_tg __read_mostly = {
static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
{
struct xt_ct_target_info_v1 *info = par->targinfo;
struct nf_conn *ct = info->ct;
struct nf_conn_help *help;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
typeof(nf_ct_timeout_put_hook) timeout_put;
#endif
if (!nf_ct_is_untracked(ct)) {
help = nfct_help(ct);
if (help)
module_put(help->helper->me);
nf_ct_l3proto_module_put(par->family);
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
if (timeout_put) {
timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext)
timeout_put(timeout_ext->timeout);
}
#endif
}
nf_ct_put(info->ct);
}
static struct xt_target xt_ct_tg_reg[] __read_mostly = {
{
.name = "CT",
.family = NFPROTO_UNSPEC,
.targetsize = sizeof(struct xt_ct_target_info),
.checkentry = xt_ct_tg_check,
.destroy = xt_ct_tg_destroy,
.target = xt_ct_target,
.checkentry = xt_ct_tg_check_v0,
.destroy = xt_ct_tg_destroy_v0,
.target = xt_ct_target_v0,
.table = "raw",
.me = THIS_MODULE,
},
{
.name = "CT",
.family = NFPROTO_UNSPEC,
.revision = 1,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.checkentry = xt_ct_tg_check_v1,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,
.table = "raw",
.me = THIS_MODULE,
},
};
static int __init xt_ct_tg_init(void)
{
return xt_register_target(&xt_ct_tg);
return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
static void __exit xt_ct_tg_exit(void)
{
xt_unregister_target(&xt_ct_tg);
xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
module_init(xt_ct_tg_init);
......
......@@ -9,12 +9,14 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/tcp.h>
......@@ -22,16 +24,129 @@
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_LOG.h>
#include <linux/netfilter/xt_LOG.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/xt_log.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: IPv4 packet logging to syslog");
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset)
{
struct udphdr _udph;
const struct udphdr *uh;
if (proto == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
sb_add(m, "PROTO=UDP ");
else /* Max length: 14 "PROTO=UDPLITE " */
sb_add(m, "PROTO=UDPLITE ");
if (fragment)
goto out;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
ntohs(uh->len));
out:
return 0;
}
static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags)
{
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
sb_add(m, "PROTO=TCP ");
if (fragment)
return 0;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & XT_LOG_TCPSEQ)
sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
/* Max length: 13 "WINDOW=65535 " */
sb_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3C " */
sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
sb_add(m, "CWR ");
if (th->ece)
sb_add(m, "ECE ");
if (th->urg)
sb_add(m, "URG ");
if (th->ack)
sb_add(m, "ACK ");
if (th->psh)
sb_add(m, "PSH ");
if (th->rst)
sb_add(m, "RST ");
if (th->syn)
sb_add(m, "SYN ");
if (th->fin)
sb_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
u_int8_t _opt[60 - sizeof(struct tcphdr)];
const u_int8_t *op;
unsigned int i;
unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
optsize, _opt);
if (op == NULL) {
sb_add(m, "OPT (TRUNCATED)");
return 1;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
sb_add(m, "OPT (");
for (i = 0; i < optsize; i++)
sb_add(m, "%02X", op[i]);
sb_add(m, ") ");
}
return 0;
}
/* One level of recursion won't kill us */
static void dump_packet(struct sbuff *m,
static void dump_ipv4_packet(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb,
unsigned int iphoff)
......@@ -74,7 +189,7 @@ static void dump_packet(struct sbuff *m,
if (ntohs(ih->frag_off) & IP_OFFSET)
sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
if ((logflags & IPT_LOG_IPOPT) &&
if ((logflags & XT_LOG_IPOPT) &&
ih->ihl * 4 > sizeof(struct iphdr)) {
const unsigned char *op;
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
......@@ -96,108 +211,19 @@ static void dump_packet(struct sbuff *m,
}
switch (ih->protocol) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
sb_add(m, "PROTO=TCP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, iphoff + ih->ihl * 4,
sizeof(_tcph), &_tcph);
if (th == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u ",
ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & IPT_LOG_TCPSEQ)
sb_add(m, "SEQ=%u ACK=%u ",
ntohl(th->seq), ntohl(th->ack_seq));
/* Max length: 13 "WINDOW=65535 " */
sb_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3F " */
sb_add(m, "RES=0x%02x ", (u8)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
sb_add(m, "CWR ");
if (th->ece)
sb_add(m, "ECE ");
if (th->urg)
sb_add(m, "URG ");
if (th->ack)
sb_add(m, "ACK ");
if (th->psh)
sb_add(m, "PSH ");
if (th->rst)
sb_add(m, "RST ");
if (th->syn)
sb_add(m, "SYN ");
if (th->fin)
sb_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & IPT_LOG_TCPOPT) &&
th->doff * 4 > sizeof(struct tcphdr)) {
unsigned char _opt[4 * 15 - sizeof(struct tcphdr)];
const unsigned char *op;
unsigned int i, optsize;
optsize = th->doff * 4 - sizeof(struct tcphdr);
op = skb_header_pointer(skb,
iphoff+ih->ihl*4+sizeof(_tcph),
optsize, _opt);
if (op == NULL) {
sb_add(m, "TRUNCATED");
case IPPROTO_TCP:
if (dump_tcp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff+ih->ihl*4, logflags))
return;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
sb_add(m, "OPT (");
for (i = 0; i < optsize; i++)
sb_add(m, "%02X", op[i]);
sb_add(m, ") ");
}
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
if (ih->protocol == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
sb_add(m, "PROTO=UDP " );
else /* Max length: 14 "PROTO=UDPLITE " */
sb_add(m, "PROTO=UDPLITE ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, iphoff+ih->ihl*4,
sizeof(_udph), &_udph);
if (uh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u LEN=%u ",
ntohs(uh->source), ntohs(uh->dest),
ntohs(uh->len));
case IPPROTO_UDPLITE:
if (dump_udp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff+ih->ihl*4))
return;
break;
}
case IPPROTO_ICMP: {
struct icmphdr _icmph;
const struct icmphdr *ich;
......@@ -270,7 +296,7 @@ static void dump_packet(struct sbuff *m,
/* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */
sb_add(m, "[");
dump_packet(m, info, skb,
dump_ipv4_packet(m, info, skb,
iphoff + ih->ihl*4+sizeof(_icmph));
sb_add(m, "] ");
}
......@@ -335,7 +361,7 @@ static void dump_packet(struct sbuff *m,
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
sb_add(m, "UID=%u GID=%u ",
......@@ -363,7 +389,7 @@ static void dump_packet(struct sbuff *m,
/* maxlen = 230+ 91 + 230 + 252 = 803 */
}
static void dump_mac_header(struct sbuff *m,
static void dump_ipv4_mac_header(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
......@@ -373,7 +399,7 @@ static void dump_mac_header(struct sbuff *m,
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & IPT_LOG_MACDECODE))
if (!(logflags & XT_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
......@@ -400,18 +426,9 @@ static void dump_mac_header(struct sbuff *m,
sb_add(m, " ");
}
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
static void
ipt_log_packet(u_int8_t pf,
log_packet_common(struct sbuff *m,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
......@@ -419,11 +436,6 @@ ipt_log_packet(u_int8_t pf,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
prefix,
in ? in->name : "",
......@@ -441,52 +453,427 @@ ipt_log_packet(u_int8_t pf,
sb_add(m, "PHYSOUT=%s ", physoutdev->name);
}
#endif
}
static void
ipt_log_packet(u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
if (in != NULL)
dump_ipv4_mac_header(m, loginfo, skb);
dump_ipv4_packet(m, loginfo, skb, 0);
sb_close(m);
}
#if IS_ENABLED(CONFIG_IPV6)
/* One level of recursion won't kill us */
static void dump_ipv6_packet(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
{
u_int8_t currenthdr;
int fragment;
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (ih == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
if (hp == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 48 "OPT (...) " */
if (logflags & XT_LOG_IPOPT)
sb_add(m, "OPT ( ");
switch (currenthdr) {
case IPPROTO_FRAGMENT: {
struct frag_hdr _fhdr;
const struct frag_hdr *fh;
sb_add(m, "FRAG:");
fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
&_fhdr);
if (fh == NULL) {
sb_add(m, "TRUNCATED ");
return;
}
/* Max length: 6 "65535 " */
sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fh->frag_off & htons(0x0001))
sb_add(m, "INCOMPLETE ");
sb_add(m, "ID:%08x ", ntohl(fh->identification));
if (ntohs(fh->frag_off) & 0xFFF8)
fragment = 1;
hdrlen = 8;
break;
}
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
if (logflags & XT_LOG_IPOPT)
sb_add(m, ")");
return;
}
hdrlen = ipv6_optlen(hp);
break;
/* Max Length */
case IPPROTO_AH:
if (logflags & XT_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
/* Max length: 3 "AH " */
sb_add(m, "AH ");
if (fragment) {
sb_add(m, ")");
return;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
&_ahdr);
if (ah == NULL) {
/*
* Max length: 26 "INCOMPLETE [65535
* bytes] )"
*/
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 15 "SPI=0xF1234567 */
sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
}
hdrlen = (hp->hdrlen+2)<<2;
break;
case IPPROTO_ESP:
if (logflags & XT_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 4 "ESP " */
sb_add(m, "ESP ");
if (fragment) {
sb_add(m, ")");
return;
}
/*
* Max length: 26 "INCOMPLETE [65535 bytes] )"
*/
eh = skb_header_pointer(skb, ptr, sizeof(_esph),
&_esph);
if (eh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 16 "SPI=0xF1234567 )" */
sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
}
return;
default:
/* Max length: 20 "Unknown Ext Hdr 255" */
sb_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
if (logflags & XT_LOG_IPOPT)
sb_add(m, ") ");
currenthdr = hp->nexthdr;
ptr += hdrlen;
}
switch (currenthdr) {
case IPPROTO_TCP:
if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
return;
break;
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmp6h;
const struct icmp6hdr *ic;
/* Max length: 13 "PROTO=ICMPv6 " */
sb_add(m, "PROTO=ICMPv6 ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
if (ic == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
switch (ic->icmp6_type) {
case ICMPV6_ECHO_REQUEST:
case ICMPV6_ECHO_REPLY:
/* Max length: 19 "ID=65535 SEQ=65535 " */
sb_add(m, "ID=%u SEQ=%u ",
ntohs(ic->icmp6_identifier),
ntohs(ic->icmp6_sequence));
break;
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
break;
case ICMPV6_PARAMPROB:
/* Max length: 17 "POINTER=ffffffff " */
sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
/* Fall through */
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
/* Max length: 3+maxlen */
if (recurse) {
sb_add(m, "[");
dump_ipv6_packet(m, info, skb,
ptr + sizeof(_icmp6h), 0);
sb_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
}
break;
}
/* Max length: 10 "PROTO=255 " */
default:
sb_add(m, "PROTO=%u ", currenthdr);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
sb_add(m, "UID=%u GID=%u ",
skb->sk->sk_socket->file->f_cred->fsuid,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!recurse && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
}
static void dump_ipv6_mac_header(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & XT_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
sb_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int len = dev->hard_header_len;
unsigned int i;
if (dev->type == ARPHRD_SIT) {
p -= ETH_HLEN;
if (p < skb->head)
p = NULL;
}
if (p != NULL) {
sb_add(m, "%02x", *p++);
for (i = 1; i < len; i++)
sb_add(m, ":%02x", *p++);
}
sb_add(m, " ");
if (dev->type == ARPHRD_SIT) {
const struct iphdr *iph =
(struct iphdr *)skb_mac_header(skb);
sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
&iph->daddr);
}
} else
sb_add(m, " ");
}
static void
ip6t_log_packet(u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_ipv6_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, 0);
dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
sb_close(m);
}
#endif
static unsigned int
log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_log_info *loginfo = par->targinfo;
const struct xt_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li,
loginfo->prefix);
if (par->family == NFPROTO_IPV4)
ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#if IS_ENABLED(CONFIG_IPV6)
else if (par->family == NFPROTO_IPV6)
ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#endif
else
WARN_ON_ONCE(1);
return XT_CONTINUE;
}
static int log_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_log_info *loginfo = par->targinfo;
const struct xt_log_info *loginfo = par->targinfo;
if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
return -EINVAL;
if (loginfo->level >= 8) {
pr_debug("level %u >= 8\n", loginfo->level);
return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
pr_debug("prefix is not null-terminated\n");
return -EINVAL;
}
return 0;
}
static struct xt_target log_tg_reg __read_mostly = {
static struct xt_target log_tg_regs[] __read_mostly = {
{
.name = "LOG",
.family = NFPROTO_IPV4,
.target = log_tg,
.targetsize = sizeof(struct ipt_log_info),
.targetsize = sizeof(struct xt_log_info),
.checkentry = log_tg_check,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IPV6)
{
.name = "LOG",
.family = NFPROTO_IPV6,
.target = log_tg,
.targetsize = sizeof(struct xt_log_info),
.checkentry = log_tg_check,
.me = THIS_MODULE,
},
#endif
};
static struct nf_logger ipt_log_logger __read_mostly = {
......@@ -495,22 +882,44 @@ static struct nf_logger ipt_log_logger __read_mostly = {
.me = THIS_MODULE,
};
#if IS_ENABLED(CONFIG_IPV6)
static struct nf_logger ip6t_log_logger __read_mostly = {
.name = "ip6t_LOG",
.logfn = &ip6t_log_packet,
.me = THIS_MODULE,
};
#endif
static int __init log_tg_init(void)
{
int ret;
ret = xt_register_target(&log_tg_reg);
ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
if (ret < 0)
return ret;
nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
#if IS_ENABLED(CONFIG_IPV6)
nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
#endif
return 0;
}
static void __exit log_tg_exit(void)
{
nf_log_unregister(&ipt_log_logger);
xt_unregister_target(&log_tg_reg);
#if IS_ENABLED(CONFIG_IPV6)
nf_log_unregister(&ip6t_log_logger);
#endif
xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
}
module_init(log_tg_init);
module_exit(log_tg_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging");
MODULE_ALIAS("ipt_LOG");
MODULE_ALIAS("ip6t_LOG");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment