Commit 2eb3ed33 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains Netfilter/IPVS updates for your net-next
tree, they are:

1) Speed up table replacement on busy systems with large tables
   (and many cores) in x_tables. Now xt_replace_table() synchronizes by
   itself by waiting until all cpus had an even seqcount and we use no
   use seqlock when fetching old counters, from Florian Westphal.

2) Add nf_l4proto_log_invalid() and nf_ct_l4proto_log_invalid() to speed
   up packet processing in the fast path when logging is not enabled, from
   Florian Westphal.

3) Precompute masked address from configuration plane in xt_connlimit,
   from Florian.

4) Don't use explicit size for set selection if performance set policy
   is selected.

5) Allow to get elements from an existing set in nf_tables.

6) Fix incorrect check in nft_hash_deactivate(), from Florian.

7) Cache netlink attribute size result in l4proto->nla_size, from
   Florian.

8) Handle NFPROTO_INET in nf_ct_netns_get() from conntrack core.

9) Use power efficient workqueue in conntrack garbage collector, from
   Vincent Guittot.

10) Remove unnecessary parameter, in conntrack l4proto functions, also
    from Florian.

11) Constify struct nf_conntrack_l3proto definitions, from Florian.

12) Remove all typedefs in nf_conntrack_h323 via coccinelle semantic
    patch, from Harsha Sharma.

13) Don't store address in the rbtree nodes in xt_connlimit, they are
    never used, from Florian.

14) Fix out of bound access in the conntrack h323 helper, patch from
    Eric Sesterhenn.

15) Print symbols for the address returned with %pS in IPVS, from
    Helge Deller.

16) Proc output should only display its own netns in IPVS, from
    KUWAZAWA Takuya.

17) Small clean up in size_entry_mwt(), from Colin Ian King.

18) Use test_and_clear_bit from nf_nat_proto_clean() instead of separated
    non-atomic test and then clear bit, from Florian Westphal.

19) Consolidate prefix length maps in ipset, from Aaron Conole.

20) Fix sparse warnings in ipset, from Jozsef Kadlecsik.

21) Simplify list_set_memsize(), from simran singhal.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 118d6298 ba0e4d99
......@@ -11,7 +11,7 @@
#define _NF_CONNTRACK_IPV4_H
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
......
......@@ -2,7 +2,7 @@
#ifndef _NF_CONNTRACK_IPV6_H
#define _NF_CONNTRACK_IPV6_H
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
......
......@@ -43,7 +43,6 @@ struct nf_conntrack_l4proto {
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeouts);
/* Called when a new connection for this protocol found;
......@@ -76,7 +75,7 @@ struct nf_conntrack_l4proto {
int (*tuple_to_nlattr)(struct sk_buff *skb,
const struct nf_conntrack_tuple *t);
/* Calculate tuple nlattr size */
int (*nlattr_tuple_size)(void);
unsigned int (*nlattr_tuple_size)(void);
int (*nlattr_to_tuple)(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
......@@ -146,15 +145,27 @@ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
int nf_ct_port_nlattr_tuple_size(void);
unsigned int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
#define LOG_INVALID(net, proto) \
((net)->ct.sysctl_log_invalid == (proto) || \
(net)->ct.sysctl_log_invalid == IPPROTO_RAW)
__printf(3, 4) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const char *fmt, ...);
__printf(5, 6) __cold
void nf_l4proto_log_invalid(const struct sk_buff *skb,
struct net *net,
u16 pf, u8 protonum,
const char *fmt, ...);
#else
static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
static inline __printf(5, 6) __cold
void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
u16 pf, u8 protonum, const char *fmt, ...) {}
static inline __printf(3, 4) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
......@@ -312,6 +312,7 @@ struct nft_expr;
* @flush: deactivate element in the next generation
* @remove: remove element from set
* @walk: iterate over all set elemeennts
* @get: get set elements
* @privsize: function to return size of set private data
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
......@@ -351,6 +352,10 @@ struct nft_set_ops {
void (*walk)(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_iter *iter);
void * (*get)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem,
unsigned int flags);
unsigned int (*privsize)(const struct nlattr * const nla[],
const struct nft_set_desc *desc);
......
......@@ -2112,9 +2112,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
for (i = 0, j = 1 ; j < 4 ; j++, i++) {
struct compat_ebt_entry_mwt *match32;
unsigned int size;
char *buf = buf_start;
char *buf = buf_start + offsets[i];
buf = buf_start + offsets[i];
if (offsets[i] > offsets[j])
return -EINVAL;
......
......@@ -634,6 +634,25 @@ static void get_counters(const struct xt_table_info *t,
}
}
static void get_old_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct arpt_entry *iter;
unsigned int cpu, i;
for_each_possible_cpu(cpu) {
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
++i;
}
cond_resched();
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
......@@ -910,8 +929,7 @@ static int __do_replace(struct net *net, const char *name,
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
/* Get the old counters, and synchronize with replace */
get_counters(oldinfo, counters);
get_old_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries;
......
......@@ -781,6 +781,26 @@ get_counters(const struct xt_table_info *t,
}
}
static void get_old_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct ipt_entry *iter;
unsigned int cpu, i;
for_each_possible_cpu(cpu) {
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
const struct xt_counters *tmp;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
++i; /* macro does multi eval of i */
}
cond_resched();
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
......@@ -1070,8 +1090,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
/* Get the old counters, and synchronize with replace */
get_counters(oldinfo, counters);
get_old_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
......
......@@ -344,7 +344,7 @@ static void ipv4_hooks_unregister(struct net *net)
mutex_unlock(&register_ipv4_hooks);
}
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = {
.l3proto = PF_INET,
.pkt_to_tuple = ipv4_pkt_to_tuple,
.invert_tuple = ipv4_invert_tuple,
......
......@@ -81,7 +81,6 @@ static int icmp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeout)
{
/* Do not immediately delete the connection after the first
......@@ -165,6 +164,12 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return NF_ACCEPT;
}
static void icmp_error_log(const struct sk_buff *skb, struct net *net,
u8 pf, const char *msg)
{
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMP, "%s", msg);
}
/* Small and modified version of icmp_rcv */
static int
icmp_error(struct net *net, struct nf_conn *tmpl,
......@@ -177,18 +182,14 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
/* Not enough header? */
icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
if (icmph == NULL) {
if (LOG_INVALID(net, IPPROTO_ICMP))
nf_log_packet(net, PF_INET, 0, skb, NULL, NULL,
NULL, "nf_ct_icmp: short packet ");
icmp_error_log(skb, net, pf, "short packet");
return -NF_ACCEPT;
}
/* See ip_conntrack_proto_tcp.c */
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_ip_checksum(skb, hooknum, dataoff, 0)) {
if (LOG_INVALID(net, IPPROTO_ICMP))
nf_log_packet(net, PF_INET, 0, skb, NULL, NULL, NULL,
"nf_ct_icmp: bad HW ICMP checksum ");
icmp_error_log(skb, net, pf, "bad hw icmp checksum");
return -NF_ACCEPT;
}
......@@ -199,9 +200,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES) {
if (LOG_INVALID(net, IPPROTO_ICMP))
nf_log_packet(net, PF_INET, 0, skb, NULL, NULL, NULL,
"nf_ct_icmp: invalid ICMP type ");
icmp_error_log(skb, net, pf, "invalid icmp type");
return -NF_ACCEPT;
}
......@@ -259,9 +258,14 @@ static int icmp_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
static int icmp_nlattr_tuple_size(void)
static unsigned int icmp_nlattr_tuple_size(void)
{
return nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1);
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
......
......@@ -800,6 +800,25 @@ get_counters(const struct xt_table_info *t,
}
}
static void get_old_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct ip6t_entry *iter;
unsigned int cpu, i;
for_each_possible_cpu(cpu) {
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
const struct xt_counters *tmp;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
++i;
}
cond_resched();
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
......@@ -1090,8 +1109,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
/* Get the old counters, and synchronize with replace */
get_counters(oldinfo, counters);
get_old_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
......
......@@ -339,7 +339,7 @@ static void ipv6_hooks_unregister(struct net *net)
mutex_unlock(&register_ipv6_hooks);
}
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = {
.l3proto = PF_INET6,
.pkt_to_tuple = ipv6_pkt_to_tuple,
.invert_tuple = ipv6_invert_tuple,
......
......@@ -94,7 +94,6 @@ static int icmpv6_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeout)
{
/* Do not immediately delete the connection after the first
......@@ -176,6 +175,12 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
static void icmpv6_error_log(const struct sk_buff *skb, struct net *net,
u8 pf, const char *msg)
{
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMPV6, "%s", msg);
}
static int
icmpv6_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
......@@ -187,17 +192,13 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmp6h == NULL) {
if (LOG_INVALID(net, IPPROTO_ICMPV6))
nf_log_packet(net, PF_INET6, 0, skb, NULL, NULL, NULL,
"nf_ct_icmpv6: short packet ");
icmpv6_error_log(skb, net, pf, "short packet");
return -NF_ACCEPT;
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
if (LOG_INVALID(net, IPPROTO_ICMPV6))
nf_log_packet(net, PF_INET6, 0, skb, NULL, NULL, NULL,
"nf_ct_icmpv6: ICMPv6 checksum failed ");
icmpv6_error_log(skb, net, pf, "ICMPv6 checksum failed");
return -NF_ACCEPT;
}
......@@ -258,9 +259,14 @@ static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
static int icmpv6_nlattr_tuple_size(void)
static unsigned int icmpv6_nlattr_tuple_size(void)
{
return nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
......
......@@ -434,7 +434,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
......
......@@ -454,7 +454,6 @@ static size_t
list_set_memsize(const struct list_set *map, size_t dsize)
{
struct set_elem *e;
size_t memsize;
u32 n = 0;
rcu_read_lock();
......@@ -462,9 +461,7 @@ list_set_memsize(const struct list_set *map, size_t dsize)
n++;
rcu_read_unlock();
memsize = sizeof(*map) + n * dsize;
return memsize;
return (sizeof(*map) + n * dsize);
}
static int
......
......@@ -3,6 +3,141 @@
/* Prefixlen maps for fast conversions, by Jan Engelhardt. */
#ifdef E
#undef E
#endif
#define PREFIXES_MAP \
E(0x00000000, 0x00000000, 0x00000000, 0x00000000), \
E(0x80000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xC0000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xE0000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xF0000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xF8000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFC000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFE000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFF000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFF800000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
#define E(a, b, c, d) \
{.ip6 = { \
htonl(a), htonl(b), \
......@@ -13,135 +148,7 @@
* just use prefixlen_netmask_map[prefixlength].ip.
*/
const union nf_inet_addr ip_set_netmask_map[] = {
E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
PREFIXES_MAP
};
EXPORT_SYMBOL_GPL(ip_set_netmask_map);
......@@ -155,135 +162,7 @@ EXPORT_SYMBOL_GPL(ip_set_netmask_map);
* just use prefixlen_hostmask_map[prefixlength].ip.
*/
const union nf_inet_addr ip_set_hostmask_map[] = {
E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
PREFIXES_MAP
};
EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
......
......@@ -185,7 +185,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
ret = 1;
} else {
pr_err("%s(): request for already hashed, called from %pF\n",
pr_err("%s(): request for already hashed, called from %pS\n",
__func__, __builtin_return_address(0));
ret = 0;
}
......
......@@ -300,7 +300,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
unsigned int hash;
if (svc->flags & IP_VS_SVC_F_HASHED) {
pr_err("%s(): request for already hashed, called from %pF\n",
pr_err("%s(): request for already hashed, called from %pS\n",
__func__, __builtin_return_address(0));
return 0;
}
......@@ -334,7 +334,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
{
if (!(svc->flags & IP_VS_SVC_F_HASHED)) {
pr_err("%s(): request for unhash flagged, called from %pF\n",
pr_err("%s(): request for unhash flagged, called from %pS\n",
__func__, __builtin_return_address(0));
return 0;
}
......@@ -2034,12 +2034,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
seq_puts(seq,
" -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
} else {
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
const struct ip_vs_service *svc = v;
const struct ip_vs_iter *iter = seq->private;
const struct ip_vs_dest *dest;
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
char *sched_name = sched ? sched->name : "none";
if (svc->ipvs != ipvs)
return 0;
if (iter->table == ip_vs_svc_table) {
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
......
......@@ -1083,7 +1083,7 @@ static void gc_worker(struct work_struct *work)
next_run = gc_work->next_gc_run;
gc_work->last_bucket = i;
gc_work->early_drop = false;
queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
}
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
......@@ -1419,7 +1419,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
/* Decide what timeout policy we want to apply to this flow. */
timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, timeouts);
ret = l4proto->packet(ct, skb, dataoff, ctinfo, timeouts);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
......@@ -1563,9 +1563,14 @@ int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
int nf_ct_port_nlattr_tuple_size(void)
unsigned int nf_ct_port_nlattr_tuple_size(void)
{
return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
......@@ -2084,7 +2089,7 @@ int nf_conntrack_init_start(void)
goto err_proto;
conntrack_gc_work_init(&conntrack_gc_work);
queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
return 0;
......
......@@ -91,41 +91,41 @@ typedef struct field_t {
} field_t;
/* Bit Stream */
typedef struct {
struct bitstr {
unsigned char *buf;
unsigned char *beg;
unsigned char *end;
unsigned char *cur;
unsigned int bit;
} bitstr_t;
};
/* Tool Functions */
#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
static unsigned int get_len(bitstr_t *bs);
static unsigned int get_bit(bitstr_t *bs);
static unsigned int get_bits(bitstr_t *bs, unsigned int b);
static unsigned int get_bitmap(bitstr_t *bs, unsigned int b);
static unsigned int get_uint(bitstr_t *bs, int b);
static unsigned int get_len(struct bitstr *bs);
static unsigned int get_bit(struct bitstr *bs);
static unsigned int get_bits(struct bitstr *bs, unsigned int b);
static unsigned int get_bitmap(struct bitstr *bs, unsigned int b);
static unsigned int get_uint(struct bitstr *bs, int b);
/* Decoder Functions */
static int decode_nul(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_bool(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_oid(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_int(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_enum(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_bitstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_numstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_octstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_bmpstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_seq(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_seqof(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_choice(bitstr_t *bs, const struct field_t *f, char *base, int level);
static int decode_nul(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_bool(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_oid(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_int(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_enum(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_bitstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_numstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_octstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_seq(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_seqof(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_choice(struct bitstr *bs, const struct field_t *f, char *base, int level);
/* Decoder Functions Vector */
typedef int (*decoder_t)(bitstr_t *, const struct field_t *, char *, int);
typedef int (*decoder_t)(struct bitstr *, const struct field_t *, char *, int);
static const decoder_t Decoders[] = {
decode_nul,
decode_bool,
......@@ -150,7 +150,7 @@ static const decoder_t Decoders[] = {
* Functions
****************************************************************************/
/* Assume bs is aligned && v < 16384 */
static unsigned int get_len(bitstr_t *bs)
static unsigned int get_len(struct bitstr *bs)
{
unsigned int v;
......@@ -166,7 +166,7 @@ static unsigned int get_len(bitstr_t *bs)
}
/****************************************************************************/
static unsigned int get_bit(bitstr_t *bs)
static unsigned int get_bit(struct bitstr *bs)
{
unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
......@@ -177,7 +177,7 @@ static unsigned int get_bit(bitstr_t *bs)
/****************************************************************************/
/* Assume b <= 8 */
static unsigned int get_bits(bitstr_t *bs, unsigned int b)
static unsigned int get_bits(struct bitstr *bs, unsigned int b)
{
unsigned int v, l;
......@@ -203,7 +203,7 @@ static unsigned int get_bits(bitstr_t *bs, unsigned int b)
/****************************************************************************/
/* Assume b <= 32 */
static unsigned int get_bitmap(bitstr_t *bs, unsigned int b)
static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
{
unsigned int v, l, shift, bytes;
......@@ -242,7 +242,7 @@ static unsigned int get_bitmap(bitstr_t *bs, unsigned int b)
/****************************************************************************
* Assume bs is aligned and sizeof(unsigned int) == 4
****************************************************************************/
static unsigned int get_uint(bitstr_t *bs, int b)
static unsigned int get_uint(struct bitstr *bs, int b)
{
unsigned int v = 0;
......@@ -264,7 +264,7 @@ static unsigned int get_uint(bitstr_t *bs, int b)
}
/****************************************************************************/
static int decode_nul(bitstr_t *bs, const struct field_t *f,
static int decode_nul(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
......@@ -273,7 +273,7 @@ static int decode_nul(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_bool(bitstr_t *bs, const struct field_t *f,
static int decode_bool(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
......@@ -285,7 +285,7 @@ static int decode_bool(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_oid(bitstr_t *bs, const struct field_t *f,
static int decode_oid(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
int len;
......@@ -302,7 +302,7 @@ static int decode_oid(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_int(bitstr_t *bs, const struct field_t *f,
static int decode_int(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
......@@ -346,7 +346,7 @@ static int decode_int(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_enum(bitstr_t *bs, const struct field_t *f,
static int decode_enum(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
......@@ -362,7 +362,7 @@ static int decode_enum(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_bitstr(bitstr_t *bs, const struct field_t *f,
static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
......@@ -396,7 +396,7 @@ static int decode_bitstr(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_numstr(bitstr_t *bs, const struct field_t *f,
static int decode_numstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
......@@ -414,7 +414,7 @@ static int decode_numstr(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_octstr(bitstr_t *bs, const struct field_t *f,
static int decode_octstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
......@@ -463,7 +463,7 @@ static int decode_octstr(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_bmpstr(bitstr_t *bs, const struct field_t *f,
static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
......@@ -489,7 +489,7 @@ static int decode_bmpstr(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_seq(bitstr_t *bs, const struct field_t *f,
static int decode_seq(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int ext, bmp, i, opt, len = 0, bmp2, bmp2_len;
......@@ -606,7 +606,7 @@ static int decode_seq(bitstr_t *bs, const struct field_t *f,
}
/****************************************************************************/
static int decode_seqof(bitstr_t *bs, const struct field_t *f,
static int decode_seqof(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int count, effective_count = 0, i, len = 0;
......@@ -696,7 +696,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
/****************************************************************************/
static int decode_choice(bitstr_t *bs, const struct field_t *f,
static int decode_choice(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int type, ext, len = 0;
......@@ -772,7 +772,7 @@ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
FNAME("RasMessage") CHOICE, 5, 24, 32, DECODE | EXT,
0, _RasMessage
};
bitstr_t bs;
struct bitstr bs;
bs.buf = bs.beg = bs.cur = buf;
bs.end = buf + sz;
......@@ -789,7 +789,7 @@ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
FNAME("H323-UserInformation") SEQ, 1, 2, 2, DECODE | EXT,
0, _H323_UserInformation
};
bitstr_t bs;
struct bitstr bs;
bs.buf = buf;
bs.beg = bs.cur = beg;
......@@ -808,7 +808,7 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
FNAME("MultimediaSystemControlMessage") CHOICE, 2, 4, 4,
DECODE | EXT, 0, _MultimediaSystemControlMessage
};
bitstr_t bs;
struct bitstr bs;
bs.buf = bs.beg = bs.cur = buf;
bs.end = buf + sz;
......@@ -877,6 +877,7 @@ int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
if (sz < 1)
break;
len = *p++;
sz--;
if (sz < len)
break;
p += len;
......
......@@ -533,11 +533,11 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
return -1;
}
static inline size_t ctnetlink_proto_size(const struct nf_conn *ct)
static size_t ctnetlink_proto_size(const struct nf_conn *ct)
{
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
size_t len;
size_t len, len4 = 0;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
len = l3proto->nla_size;
......@@ -545,8 +545,12 @@ static inline size_t ctnetlink_proto_size(const struct nf_conn *ct)
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
len += l4proto->nla_size;
if (l4proto->nlattr_tuple_size) {
len4 = l4proto->nlattr_tuple_size();
len4 *= 3u; /* ORIG, REPLY, MASTER */
}
return len;
return len + len4;
}
static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
......
......@@ -27,6 +27,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_log.h>
static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly;
struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO] __read_mostly;
......@@ -63,6 +64,52 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header,
*header = NULL;
*table = NULL;
}
__printf(5, 6)
void nf_l4proto_log_invalid(const struct sk_buff *skb,
struct net *net,
u16 pf, u8 protonum,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (net->ct.sysctl_log_invalid != protonum ||
net->ct.sysctl_log_invalid != IPPROTO_RAW)
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_proto_%d: %pV ", protonum, &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_l4proto_log_invalid);
__printf(3, 4)
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const char *fmt, ...)
{
struct va_format vaf;
struct net *net;
va_list args;
net = nf_ct_net(ct);
if (likely(net->ct.sysctl_log_invalid == 0))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_l4proto_log_invalid(skb, net, nf_ct_l3num(ct),
nf_ct_protonum(ct), "%pV", &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid);
#endif
const struct nf_conntrack_l4proto *
......@@ -125,7 +172,7 @@ void nf_ct_l3proto_module_put(unsigned short l3proto)
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
int nf_ct_netns_get(struct net *net, u8 nfproto)
static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
{
const struct nf_conntrack_l3proto *l3proto;
int ret;
......@@ -150,9 +197,33 @@ int nf_ct_netns_get(struct net *net, u8 nfproto)
return ret;
}
int nf_ct_netns_get(struct net *net, u8 nfproto)
{
int err;
if (nfproto == NFPROTO_INET) {
err = nf_ct_netns_do_get(net, NFPROTO_IPV4);
if (err < 0)
goto err1;
err = nf_ct_netns_do_get(net, NFPROTO_IPV6);
if (err < 0)
goto err2;
} else {
err = nf_ct_netns_do_get(net, nfproto);
if (err < 0)
goto err1;
}
return 0;
err2:
nf_ct_netns_put(net, NFPROTO_IPV4);
err1:
return err;
}
EXPORT_SYMBOL_GPL(nf_ct_netns_get);
void nf_ct_netns_put(struct net *net, u8 nfproto)
static void nf_ct_netns_do_put(struct net *net, u8 nfproto)
{
const struct nf_conntrack_l3proto *l3proto;
......@@ -171,6 +242,15 @@ void nf_ct_netns_put(struct net *net, u8 nfproto)
nf_ct_l3proto_module_put(nfproto);
}
void nf_ct_netns_put(struct net *net, uint8_t nfproto)
{
if (nfproto == NFPROTO_INET) {
nf_ct_netns_do_put(net, NFPROTO_IPV4);
nf_ct_netns_do_put(net, NFPROTO_IPV6);
} else
nf_ct_netns_do_put(net, nfproto);
}
EXPORT_SYMBOL_GPL(nf_ct_netns_put);
const struct nf_conntrack_l4proto *
......@@ -351,8 +431,6 @@ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
l4proto->nla_size = 0;
if (l4proto->nlattr_size)
l4proto->nla_size += l4proto->nlattr_size();
if (l4proto->nlattr_tuple_size)
l4proto->nla_size += 3 * l4proto->nlattr_tuple_size();
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
l4proto);
......
......@@ -428,13 +428,13 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
msg = "not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
msg = "invalid state transition ";
goto out_invalid;
}
......@@ -447,9 +447,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
nf_ct_l4proto_log_invalid(skb, ct, "%s", msg);
return false;
}
......@@ -469,10 +467,8 @@ static unsigned int *dccp_get_timeouts(struct net *net)
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
......@@ -534,15 +530,11 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.dccp.last_pkt = type;
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid packet ignored ");
nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid packet");
return NF_ACCEPT;
case CT_DCCP_INVALID:
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid state transition ");
nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid state transition");
return -NF_ACCEPT;
}
......@@ -604,8 +596,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_DCCP, "%s", msg);
return -NF_ACCEPT;
}
......
......@@ -60,7 +60,6 @@ static int generic_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeout)
{
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
......
......@@ -244,7 +244,6 @@ static int gre_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is a GRE connection.
......
......@@ -306,7 +306,6 @@ static int sctp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeouts)
{
enum sctp_conntrack new_state, old_state;
......@@ -522,8 +521,7 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
}
return NF_ACCEPT;
out_invalid:
if (LOG_INVALID(net, IPPROTO_SCTP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", logmsg);
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_SCTP, "%s", logmsg);
return -NF_ACCEPT;
}
......
......@@ -493,8 +493,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
unsigned int index,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
u_int8_t pf)
const struct tcphdr *tcph)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = tcp_pernet(net);
......@@ -702,9 +701,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
tn->tcp_be_liberal)
res = true;
if (!res && LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
if (!res) {
nf_ct_l4proto_log_invalid(skb, ct,
"%s",
before(seq, sender->td_maxend + 1) ?
in_recv_win ?
before(sack, receiver->td_end + 1) ?
......@@ -713,6 +712,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
}
}
pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
......@@ -738,6 +738,12 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
[TCPHDR_ACK|TCPHDR_URG] = 1,
};
static void tcp_error_log(const struct sk_buff *skb, struct net *net,
u8 pf, const char *msg)
{
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_TCP, "%s", msg);
}
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
static int tcp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
......@@ -753,17 +759,13 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
/* Smaller that minimal TCP header? */
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
if (th == NULL) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: short packet ");
tcp_error_log(skb, net, pf, "short packet");
return -NF_ACCEPT;
}
/* Not whole TCP header or malformed packet */
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: truncated/malformed packet ");
tcp_error_log(skb, net, pf, "truncated packet");
return -NF_ACCEPT;
}
......@@ -774,18 +776,14 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
/* FIXME: Source route IP option packets --RR */
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: bad TCP checksum ");
tcp_error_log(skb, net, pf, "bad checksum");
return -NF_ACCEPT;
}
/* Check TCP flags. */
tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid TCP flag combination ");
tcp_error_log(skb, net, pf, "invalid tcp flag combination");
return -NF_ACCEPT;
}
......@@ -802,7 +800,6 @@ static int tcp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
......@@ -939,10 +936,8 @@ static int tcp_packet(struct nf_conn *ct,
IP_CT_EXP_CHALLENGE_ACK;
}
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid packet ignored in "
"state %s ", tcp_conntrack_names[old_state]);
nf_ct_l4proto_log_invalid(skb, ct, "invalid packet ignored in "
"state %s ", tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
/* Special case for SYN proxy: when the SYN to the server or
......@@ -964,9 +959,7 @@ static int tcp_packet(struct nf_conn *ct,
pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
dir, get_conntrack_index(th), old_state);
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid state ");
nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
return -NF_ACCEPT;
case TCP_CONNTRACK_TIME_WAIT:
/* RFC5961 compliance cause stack to send "challenge-ACK"
......@@ -981,9 +974,7 @@ static int tcp_packet(struct nf_conn *ct,
/* Detected RFC5961 challenge ACK */
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: challenge-ACK ignored ");
nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
return NF_ACCEPT; /* Don't change state */
}
break;
......@@ -993,9 +984,7 @@ static int tcp_packet(struct nf_conn *ct,
&& before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL,
NULL, "nf_ct_tcp: invalid RST ");
nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
return -NF_ACCEPT;
}
if (index == TCP_RST_SET
......@@ -1022,7 +1011,7 @@ static int tcp_packet(struct nf_conn *ct,
}
if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
skb, dataoff, th, pf)) {
skb, dataoff, th)) {
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
}
......@@ -1288,9 +1277,14 @@ static int tcp_nlattr_size(void)
+ nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
}
static int tcp_nlattr_tuple_size(void)
static unsigned int tcp_nlattr_tuple_size(void)
{
return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
......
......@@ -73,7 +73,6 @@ static int udp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
......@@ -99,6 +98,12 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
}
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
static void udplite_error_log(const struct sk_buff *skb, struct net *net,
u8 pf, const char *msg)
{
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDPLITE, "%s", msg);
}
static int udplite_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
......@@ -112,9 +117,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
/* Header is too small? */
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (!hdr) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: short packet ");
udplite_error_log(skb, net, pf, "short packet");
return -NF_ACCEPT;
}
......@@ -122,17 +125,13 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
if (cscov == 0) {
cscov = udplen;
} else if (cscov < sizeof(*hdr) || cscov > udplen) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: invalid checksum coverage ");
udplite_error_log(skb, net, pf, "invalid checksum coverage");
return -NF_ACCEPT;
}
/* UDPLITE mandates checksums */
if (!hdr->check) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: checksum missing ");
udplite_error_log(skb, net, pf, "checksum missing");
return -NF_ACCEPT;
}
......@@ -140,9 +139,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
pf)) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: bad UDPLite checksum ");
udplite_error_log(skb, net, pf, "bad checksum");
return -NF_ACCEPT;
}
......@@ -150,6 +147,12 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
}
#endif
static void udp_error_log(const struct sk_buff *skb, struct net *net,
u8 pf, const char *msg)
{
nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDP, "%s", msg);
}
static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
unsigned int dataoff,
u_int8_t pf,
......@@ -162,17 +165,13 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
/* Header is too small? */
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hdr == NULL) {
if (LOG_INVALID(net, IPPROTO_UDP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udp: short packet ");
udp_error_log(skb, net, pf, "short packet");
return -NF_ACCEPT;
}
/* Truncated/malformed packets */
if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
if (LOG_INVALID(net, IPPROTO_UDP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udp: truncated/malformed packet ");
udp_error_log(skb, net, pf, "truncated/malformed packet");
return -NF_ACCEPT;
}
......@@ -186,9 +185,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
* FIXME: Source route IP option packets --RR */
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) {
if (LOG_INVALID(net, IPPROTO_UDP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udp: bad UDP checksum ");
udp_error_log(skb, net, pf, "bad checksum");
return -NF_ACCEPT;
}
......
......@@ -542,17 +542,14 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
if (nf_nat_proto_remove(ct, data))
return 1;
if ((ct->status & IPS_SRC_NAT_DONE) == 0)
return 0;
/* This netns is being destroyed, and conntrack has nat null binding.
/* This module is being removed and conntrack has nat null binding.
* Remove it from bysource hash, as the table will be freed soon.
*
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
* will delete entry from already-freed table.
*/
clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
__nf_nat_cleanup_conntrack(ct);
if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
__nf_nat_cleanup_conntrack(ct);
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
......
......@@ -2549,14 +2549,9 @@ nft_select_set_ops(const struct nft_ctx *ctx,
case NFT_SET_POL_PERFORMANCE:
if (est.lookup < best.lookup)
break;
if (est.lookup == best.lookup) {
if (!desc->size) {
if (est.space < best.space)
break;
} else if (est.size < best.size) {
break;
}
}
if (est.lookup == best.lookup &&
est.space < best.space)
break;
continue;
case NFT_SET_POL_MEMORY:
if (!desc->size) {
......@@ -3593,45 +3588,6 @@ static int nf_tables_dump_set_done(struct netlink_callback *cb)
return 0;
}
static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
struct netlink_ext_ack *extack)
{
u8 genmask = nft_genmask_cur(net);
const struct nft_set *set;
struct nft_ctx ctx;
int err;
err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask);
if (err < 0)
return err;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nf_tables_dump_set,
.done = nf_tables_dump_set_done,
};
struct nft_set_dump_ctx *dump_ctx;
dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL);
if (!dump_ctx)
return -ENOMEM;
dump_ctx->set = set;
dump_ctx->ctx = ctx;
c.data = dump_ctx;
return netlink_dump_start(nlsk, skb, nlh, &c);
}
return -EOPNOTSUPP;
}
static int nf_tables_fill_setelem_info(struct sk_buff *skb,
const struct nft_ctx *ctx, u32 seq,
u32 portid, int event, u16 flags,
......@@ -3677,6 +3633,135 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
return -1;
}
static int nft_setelem_parse_flags(const struct nft_set *set,
const struct nlattr *attr, u32 *flags)
{
if (attr == NULL)
return 0;
*flags = ntohl(nla_get_be32(attr));
if (*flags & ~NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
if (!(set->flags & NFT_SET_INTERVAL) &&
*flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
return 0;
}
static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
const struct nft_set_ext *ext;
struct nft_data_desc desc;
struct nft_set_elem elem;
struct sk_buff *skb;
uint32_t flags = 0;
void *priv;
int err;
err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
return err;
if (!nla[NFTA_SET_ELEM_KEY])
return -EINVAL;
err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
if (err < 0)
return err;
err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
return err;
err = -EINVAL;
if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
return err;
priv = set->ops->get(ctx->net, set, &elem, flags);
if (IS_ERR(priv))
return PTR_ERR(priv);
elem.priv = priv;
ext = nft_set_elem_ext(set, &elem);
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err1;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem);
if (err < 0)
goto err2;
err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
/* This avoids a loop in nfnetlink. */
if (err < 0)
goto err1;
return 0;
err2:
kfree_skb(skb);
err1:
/* this avoids a loop in nfnetlink. */
return err == -EAGAIN ? -ENOBUFS : err;
}
static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
struct netlink_ext_ack *extack)
{
u8 genmask = nft_genmask_cur(net);
struct nft_set *set;
struct nlattr *attr;
struct nft_ctx ctx;
int rem, err = 0;
err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask);
if (err < 0)
return err;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nf_tables_dump_set,
.done = nf_tables_dump_set_done,
};
struct nft_set_dump_ctx *dump_ctx;
dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL);
if (!dump_ctx)
return -ENOMEM;
dump_ctx->set = set;
dump_ctx->ctx = ctx;
c.data = dump_ctx;
return netlink_dump_start(nlsk, skb, nlh, &c);
}
if (!nla[NFTA_SET_ELEM_LIST_ELEMENTS])
return -EINVAL;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_get_set_elem(&ctx, set, attr);
if (err < 0)
break;
}
return err;
}
static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nft_set_elem *elem,
......@@ -3777,22 +3862,6 @@ static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem)
kfree(elem);
}
static int nft_setelem_parse_flags(const struct nft_set *set,
const struct nlattr *attr, u32 *flags)
{
if (attr == NULL)
return 0;
*flags = ntohl(nla_get_be32(attr));
if (*flags & ~NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
if (!(set->flags & NFT_SET_INTERVAL) &&
*flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
return 0;
}
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
......
......@@ -312,39 +312,6 @@ static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
[NFTA_CT_SREG] = { .type = NLA_U32 },
};
static int nft_ct_netns_get(struct net *net, uint8_t family)
{
int err;
if (family == NFPROTO_INET) {
err = nf_ct_netns_get(net, NFPROTO_IPV4);
if (err < 0)
goto err1;
err = nf_ct_netns_get(net, NFPROTO_IPV6);
if (err < 0)
goto err2;
} else {
err = nf_ct_netns_get(net, family);
if (err < 0)
goto err1;
}
return 0;
err2:
nf_ct_netns_put(net, NFPROTO_IPV4);
err1:
return err;
}
static void nft_ct_netns_put(struct net *net, uint8_t family)
{
if (family == NFPROTO_INET) {
nf_ct_netns_put(net, NFPROTO_IPV4);
nf_ct_netns_put(net, NFPROTO_IPV6);
} else
nf_ct_netns_put(net, family);
}
#ifdef CONFIG_NF_CONNTRACK_ZONES
static void nft_ct_tmpl_put_pcpu(void)
{
......@@ -489,7 +456,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
err = nft_ct_netns_get(ctx->net, ctx->afi->family);
err = nf_ct_netns_get(ctx->net, ctx->afi->family);
if (err < 0)
return err;
......@@ -583,7 +550,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
if (err < 0)
goto err1;
err = nft_ct_netns_get(ctx->net, ctx->afi->family);
err = nf_ct_netns_get(ctx->net, ctx->afi->family);
if (err < 0)
goto err1;
......@@ -606,7 +573,7 @@ static void nft_ct_set_destroy(const struct nft_ctx *ctx,
struct nft_ct *priv = nft_expr_priv(expr);
__nft_ct_set_destroy(ctx, priv);
nft_ct_netns_put(ctx->net, ctx->afi->family);
nf_ct_netns_put(ctx->net, ctx->afi->family);
}
static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
......
......@@ -106,6 +106,23 @@ nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
return NULL;
}
static void *nft_bitmap_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
const struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
struct nft_bitmap_elem *be;
list_for_each_entry_rcu(be, &priv->list, head) {
if (memcmp(nft_set_ext_key(&be->ext), elem->key.val.data, set->klen) ||
!nft_set_elem_active(&be->ext, genmask))
continue;
return be;
}
return ERR_PTR(-ENOENT);
}
static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext)
......@@ -294,6 +311,7 @@ static struct nft_set_ops nft_bitmap_ops __read_mostly = {
.activate = nft_bitmap_activate,
.lookup = nft_bitmap_lookup,
.walk = nft_bitmap_walk,
.get = nft_bitmap_get,
};
static struct nft_set_type nft_bitmap_type __read_mostly = {
......
......@@ -95,6 +95,24 @@ static bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
return !!he;
}
static void *nft_rhash_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he;
struct nft_rhash_cmp_arg arg = {
.genmask = nft_genmask_cur(net),
.set = set,
.key = elem->key.val.data,
};
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
return he;
return ERR_PTR(-ENOENT);
}
static bool nft_rhash_update(struct nft_set *set, const u32 *key,
void *(*new)(struct nft_set *,
const struct nft_expr *,
......@@ -409,6 +427,24 @@ static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
return false;
}
static void *nft_hash_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
struct nft_hash_elem *he;
u32 hash;
hash = jhash(elem->key.val.data, set->klen, priv->seed);
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&he->ext), elem->key.val.data, set->klen) &&
nft_set_elem_active(&he->ext, genmask))
return he;
}
return ERR_PTR(-ENOENT);
}
/* nft_hash_select_ops() makes sure key size can be either 2 or 4 bytes . */
static inline u32 nft_hash_key(const u32 *key, u32 klen)
{
......@@ -494,7 +530,7 @@ static void *nft_hash_deactivate(const struct net *net,
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&this->ext), &elem->key.val,
set->klen) ||
set->klen) &&
nft_set_elem_active(&he->ext, genmask)) {
nft_set_elem_change_active(net, set, &he->ext);
return he;
......@@ -600,6 +636,7 @@ static struct nft_set_ops nft_rhash_ops __read_mostly = {
.lookup = nft_rhash_lookup,
.update = nft_rhash_update,
.walk = nft_rhash_walk,
.get = nft_rhash_get,
.features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
};
......@@ -617,6 +654,7 @@ static struct nft_set_ops nft_hash_ops __read_mostly = {
.remove = nft_hash_remove,
.lookup = nft_hash_lookup,
.walk = nft_hash_walk,
.get = nft_hash_get,
.features = NFT_SET_MAP | NFT_SET_OBJECT,
};
......@@ -634,6 +672,7 @@ static struct nft_set_ops nft_hash_fast_ops __read_mostly = {
.remove = nft_hash_remove,
.lookup = nft_hash_lookup_fast,
.walk = nft_hash_walk,
.get = nft_hash_get,
.features = NFT_SET_MAP | NFT_SET_OBJECT,
};
......
......@@ -113,6 +113,78 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
return ret;
}
static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
const u32 *key, struct nft_rbtree_elem **elem,
unsigned int seq, unsigned int flags, u8 genmask)
{
struct nft_rbtree_elem *rbe, *interval = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent;
const void *this;
int d;
parent = rcu_dereference_raw(priv->root.rb_node);
while (parent != NULL) {
if (read_seqcount_retry(&priv->count, seq))
return false;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
this = nft_set_ext_key(&rbe->ext);
d = memcmp(this, key, set->klen);
if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left);
interval = rbe;
} else if (d > 0) {
parent = rcu_dereference_raw(parent->rb_right);
} else {
if (!nft_set_elem_active(&rbe->ext, genmask))
parent = rcu_dereference_raw(parent->rb_left);
if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
(flags & NFT_SET_ELEM_INTERVAL_END)) {
*elem = rbe;
return true;
}
return false;
}
}
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
!nft_rbtree_interval_end(interval)) {
*elem = interval;
return true;
}
return false;
}
static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
struct nft_rbtree *priv = nft_set_priv(set);
unsigned int seq = read_seqcount_begin(&priv->count);
struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
const u32 *key = (const u32 *)&elem->key.val;
u8 genmask = nft_genmask_cur(net);
bool ret;
ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
if (ret || !read_seqcount_retry(&priv->count, seq))
return rbe;
read_lock_bh(&priv->lock);
seq = read_seqcount_begin(&priv->count);
ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
if (!ret)
rbe = ERR_PTR(-ENOENT);
read_unlock_bh(&priv->lock);
return rbe;
}
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
......@@ -336,6 +408,7 @@ static struct nft_set_ops nft_rbtree_ops __read_mostly = {
.activate = nft_rbtree_activate,
.lookup = nft_rbtree_lookup,
.walk = nft_rbtree_walk,
.get = nft_rbtree_get,
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT,
};
......
......@@ -1153,6 +1153,7 @@ xt_replace_table(struct xt_table *table,
int *error)
{
struct xt_table_info *private;
unsigned int cpu;
int ret;
ret = xt_jumpstack_alloc(newinfo);
......@@ -1182,14 +1183,28 @@ xt_replace_table(struct xt_table *table,
smp_wmb();
table->private = newinfo;
/* make sure all cpus see new ->private value */
smp_wmb();
/*
* Even though table entries have now been swapped, other CPU's
* may still be using the old entries. This is okay, because
* resynchronization happens because of the locking done
* during the get_counters() routine.
* may still be using the old entries...
*/
local_bh_enable();
/* ... so wait for even xt_recseq on all cpus */
for_each_possible_cpu(cpu) {
seqcount_t *s = &per_cpu(xt_recseq, cpu);
u32 seq = raw_read_seqcount(s);
if (seq & 1) {
do {
cond_resched();
cpu_relax();
} while (seq == raw_read_seqcount(s));
}
}
#ifdef CONFIG_AUDIT
if (audit_enabled) {
audit_log(current->audit_context, GFP_KERNEL,
......
......@@ -46,7 +46,6 @@
struct xt_connlimit_conn {
struct hlist_node node;
struct nf_conntrack_tuple tuple;
union nf_inet_addr addr;
};
struct xt_connlimit_rb {
......@@ -72,16 +71,9 @@ static inline unsigned int connlimit_iphash(__be32 addr)
}
static inline unsigned int
connlimit_iphash6(const union nf_inet_addr *addr,
const union nf_inet_addr *mask)
connlimit_iphash6(const union nf_inet_addr *addr)
{
union nf_inet_addr res;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
res.ip6[i] = addr->ip6[i] & mask->ip6[i];
return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6),
return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6),
connlimit_rnd) % CONNLIMIT_SLOTS;
}
......@@ -95,24 +87,13 @@ static inline bool already_closed(const struct nf_conn *conn)
}
static int
same_source_net(const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
const union nf_inet_addr *u3, u_int8_t family)
same_source(const union nf_inet_addr *addr,
const union nf_inet_addr *u3, u_int8_t family)
{
if (family == NFPROTO_IPV4) {
return ntohl(addr->ip & mask->ip) -
ntohl(u3->ip & mask->ip);
} else {
union nf_inet_addr lh, rh;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) {
lh.ip6[i] = addr->ip6[i] & mask->ip6[i];
rh.ip6[i] = u3->ip6[i] & mask->ip6[i];
}
if (family == NFPROTO_IPV4)
return ntohl(addr->ip) - ntohl(u3->ip);
return memcmp(&lh.ip6, &rh.ip6, sizeof(lh.ip6));
}
return memcmp(addr->ip6, u3->ip6, sizeof(addr->ip6));
}
static bool add_hlist(struct hlist_head *head,
......@@ -125,7 +106,6 @@ static bool add_hlist(struct hlist_head *head,
if (conn == NULL)
return false;
conn->tuple = *tuple;
conn->addr = *addr;
hlist_add_head(&conn->node, head);
return true;
}
......@@ -196,7 +176,7 @@ static void tree_nodes_free(struct rb_root *root,
static unsigned int
count_tree(struct net *net, struct rb_root *root,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr, const union nf_inet_addr *mask,
const union nf_inet_addr *addr,
u8 family, const struct nf_conntrack_zone *zone)
{
struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
......@@ -217,7 +197,7 @@ count_tree(struct net *net, struct rb_root *root,
rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
parent = *rbnode;
diff = same_source_net(addr, mask, &rbconn->addr, family);
diff = same_source(addr, &rbconn->addr, family);
if (diff < 0) {
rbnode = &((*rbnode)->rb_left);
} else if (diff > 0) {
......@@ -270,7 +250,6 @@ count_tree(struct net *net, struct rb_root *root,
}
conn->tuple = *tuple;
conn->addr = *addr;
rbconn->addr = *addr;
INIT_HLIST_HEAD(&rbconn->hhead);
......@@ -285,7 +264,6 @@ static int count_them(struct net *net,
struct xt_connlimit_data *data,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
u_int8_t family,
const struct nf_conntrack_zone *zone)
{
......@@ -294,14 +272,14 @@ static int count_them(struct net *net,
u32 hash;
if (family == NFPROTO_IPV6)
hash = connlimit_iphash6(addr, mask);
hash = connlimit_iphash6(addr);
else
hash = connlimit_iphash(addr->ip & mask->ip);
hash = connlimit_iphash(addr->ip);
root = &data->climit_root[hash];
spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
count = count_tree(net, root, tuple, addr, mask, family, zone);
count = count_tree(net, root, tuple, addr, family, zone);
spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
......@@ -332,16 +310,23 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (xt_family(par) == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
unsigned int i;
memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
&iph->daddr : &iph->saddr, sizeof(addr.ip6));
for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i)
addr.ip6[i] &= info->mask.ip6[i];
} else {
const struct iphdr *iph = ip_hdr(skb);
addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
iph->daddr : iph->saddr;
addr.ip &= info->mask.ip;
}
connections = count_them(net, info->data, tuple_ptr, &addr,
&info->mask, xt_family(par), zone);
xt_family(par), zone);
if (connections == 0)
/* kmalloc failed, drop it entirely */
goto hotdrop;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment