Commit ada6c1de authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

This a bit large (and late) patchset that contains Netfilter updates for
net-next. Most relevantly br_netfilter fixes, ipset RCU support, removal of
x_tables percpu ruleset copy and rework of the nf_tables netdev support. More
specifically, they are:

1) Warn the user when there is a better protocol conntracker available, from
   Marcelo Ricardo Leitner.

2) Fix forwarding of IPv6 fragmented traffic in br_netfilter, from Bernhard
   Thaler. This comes with several patches to prepare the change in first place.

3) Get rid of special mtu handling of PPPoE/VLAN frames for br_netfilter. This
   is not needed anymore since now we use the largest fragment size to
   refragment, from Florian Westphal.

4) Restore vlan tag when refragmenting in br_netfilter, also from Florian.

5) Get rid of the percpu ruleset copy in x_tables, from Florian. Plus another
   follow up patch to refine it from Eric Dumazet.

6) Several ipset cleanups, fixes and finally RCU support, from Jozsef Kadlecsik.

7) Get rid of parens in Netfilter Kconfig files.

8) Attach the net_device to the basechain as opposed to the initial per table
   approach in the nf_tables netdev family.

9) Subscribe to netdev events to detect the removal and registration of a
   device that is referenced by a basechain.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 758f0d4b 835b8033
...@@ -108,8 +108,13 @@ struct ip_set_counter { ...@@ -108,8 +108,13 @@ struct ip_set_counter {
atomic64_t packets; atomic64_t packets;
}; };
struct ip_set_comment_rcu {
struct rcu_head rcu;
char str[0];
};
struct ip_set_comment { struct ip_set_comment {
char *str; struct ip_set_comment_rcu __rcu *c;
}; };
struct ip_set_skbinfo { struct ip_set_skbinfo {
...@@ -176,6 +181,9 @@ struct ip_set_type_variant { ...@@ -176,6 +181,9 @@ struct ip_set_type_variant {
/* List elements */ /* List elements */
int (*list)(const struct ip_set *set, struct sk_buff *skb, int (*list)(const struct ip_set *set, struct sk_buff *skb,
struct netlink_callback *cb); struct netlink_callback *cb);
/* Keep listing private when resizing runs parallel */
void (*uref)(struct ip_set *set, struct netlink_callback *cb,
bool start);
/* Return true if "b" set is the same as "a" /* Return true if "b" set is the same as "a"
* according to the create set parameters */ * according to the create set parameters */
...@@ -223,7 +231,7 @@ struct ip_set { ...@@ -223,7 +231,7 @@ struct ip_set {
/* The name of the set */ /* The name of the set */
char name[IPSET_MAXNAMELEN]; char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */ /* Lock protecting the set data */
rwlock_t lock; spinlock_t lock;
/* References to the set */ /* References to the set */
u32 ref; u32 ref;
/* The core set type */ /* The core set type */
...@@ -346,7 +354,6 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) ...@@ -346,7 +354,6 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
(skbinfo->skbqueue && (skbinfo->skbqueue &&
nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue))); cpu_to_be16(skbinfo->skbqueue)));
} }
static inline void static inline void
...@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter, ...@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
/* Netlink CB args */ /* Netlink CB args */
enum { enum {
IPSET_CB_NET = 0, IPSET_CB_NET = 0, /* net namespace */
IPSET_CB_DUMP, IPSET_CB_DUMP, /* dump single set/all sets */
IPSET_CB_INDEX, IPSET_CB_INDEX, /* set index */
IPSET_CB_ARG0, IPSET_CB_PRIVATE, /* set private data */
IPSET_CB_ARG0, /* type specific */
IPSET_CB_ARG1, IPSET_CB_ARG1,
IPSET_CB_ARG2,
}; };
/* register and unregister set references */ /* register and unregister set references */
...@@ -545,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, ...@@ -545,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
.timeout = (set)->timeout } .timeout = (set)->timeout }
#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
#define IPSET_CONCAT(a, b) a##b #define IPSET_CONCAT(a, b) a##b
#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b) #define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
......
...@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb) ...@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
return nla_data(tb); return nla_data(tb);
} }
/* Called from uadd only, protected by the set spinlock.
* The kadt functions don't use the comment extensions in any way.
*/
static inline void static inline void
ip_set_init_comment(struct ip_set_comment *comment, ip_set_init_comment(struct ip_set_comment *comment,
const struct ip_set_ext *ext) const struct ip_set_ext *ext)
{ {
struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0; size_t len = ext->comment ? strlen(ext->comment) : 0;
if (unlikely(comment->str)) { if (unlikely(c)) {
kfree(comment->str); kfree_rcu(c, rcu);
comment->str = NULL; rcu_assign_pointer(comment->c, NULL);
} }
if (!len) if (!len)
return; return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE; len = IPSET_MAX_COMMENT_SIZE;
comment->str = kzalloc(len + 1, GFP_ATOMIC); c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
if (unlikely(!comment->str)) if (unlikely(!c))
return; return;
strlcpy(comment->str, ext->comment, len + 1); strlcpy(c->str, ext->comment, len + 1);
rcu_assign_pointer(comment->c, c);
} }
/* Used only when dumping a set, protected by rcu_read_lock_bh() */
static inline int static inline int
ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
{ {
if (!comment->str) struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
if (!c)
return 0; return 0;
return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str); return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
} }
/* Called from uadd/udel, flush or the garbage collectors protected
* by the set spinlock.
* Called when the set is destroyed and when there can't be any user
* of the set data anymore.
*/
static inline void static inline void
ip_set_comment_free(struct ip_set_comment *comment) ip_set_comment_free(struct ip_set_comment *comment)
{ {
if (unlikely(!comment->str)) struct ip_set_comment_rcu *c;
c = rcu_dereference_protected(comment->c, 1);
if (unlikely(!c))
return; return;
kfree(comment->str); kfree_rcu(c, rcu);
comment->str = NULL; rcu_assign_pointer(comment->c, NULL);
} }
#endif #endif
......
...@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb) ...@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
} }
static inline bool static inline bool
ip_set_timeout_test(unsigned long timeout) ip_set_timeout_expired(unsigned long *t)
{ {
return timeout == IPSET_ELEM_PERMANENT || return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
time_is_after_jiffies(timeout);
}
static inline bool
ip_set_timeout_expired(unsigned long *timeout)
{
return *timeout != IPSET_ELEM_PERMANENT &&
time_is_before_jiffies(*timeout);
} }
static inline void static inline void
ip_set_timeout_set(unsigned long *timeout, u32 t) ip_set_timeout_set(unsigned long *timeout, u32 value)
{ {
if (!t) { unsigned long t;
if (!value) {
*timeout = IPSET_ELEM_PERMANENT; *timeout = IPSET_ELEM_PERMANENT;
return; return;
} }
*timeout = msecs_to_jiffies(t * 1000) + jiffies; t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
if (*timeout == IPSET_ELEM_PERMANENT) if (t == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */ /* Bingo! :-) */
(*timeout)--; t--;
*timeout = t;
} }
static inline u32 static inline u32
ip_set_timeout_get(unsigned long *timeout) ip_set_timeout_get(unsigned long *timeout)
{ {
return *timeout == IPSET_ELEM_PERMANENT ? 0 : return *timeout == IPSET_ELEM_PERMANENT ? 0 :
jiffies_to_msecs(*timeout - jiffies)/1000; jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -224,13 +224,10 @@ struct xt_table_info { ...@@ -224,13 +224,10 @@ struct xt_table_info {
unsigned int stacksize; unsigned int stacksize;
unsigned int __percpu *stackptr; unsigned int __percpu *stackptr;
void ***jumpstack; void ***jumpstack;
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ unsigned char entries[0] __aligned(8);
void *entries[1];
}; };
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
+ nr_cpu_ids * sizeof(char *))
int xt_register_target(struct xt_target *target); int xt_register_target(struct xt_target *target);
void xt_unregister_target(struct xt_target *target); void xt_unregister_target(struct xt_target *target);
int xt_register_targets(struct xt_target *target, unsigned int n); int xt_register_targets(struct xt_target *target, unsigned int n);
...@@ -353,6 +350,55 @@ static inline unsigned long ifname_compare_aligned(const char *_a, ...@@ -353,6 +350,55 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret; return ret;
} }
/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
* real (percpu) counter. On !SMP, its just the packet count,
* so nothing needs to be done there.
*
* xt_percpu_counter_alloc returns the address of the percpu
* counter, or 0 on !SMP.
*
* Hence caller must use IS_ERR_VALUE to check for error, this
* allows us to return 0 for single core systems without forcing
* callers to deal with SMP vs. NONSMP issues.
*/
static inline u64 xt_percpu_counter_alloc(void)
{
if (nr_cpu_ids > 1) {
void __percpu *res = alloc_percpu(struct xt_counters);
if (res == NULL)
return (u64) -ENOMEM;
return (__force u64) res;
}
return 0;
}
static inline void xt_percpu_counter_free(u64 pcnt)
{
if (nr_cpu_ids > 1)
free_percpu((void __percpu *) pcnt);
}
static inline struct xt_counters *
xt_get_this_cpu_counter(struct xt_counters *cnt)
{
if (nr_cpu_ids > 1)
return this_cpu_ptr((void __percpu *) cnt->pcnt);
return cnt;
}
static inline struct xt_counters *
xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
{
if (nr_cpu_ids > 1)
return per_cpu_ptr((void __percpu *) cnt->pcnt, cpu);
return cnt;
}
struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
......
...@@ -20,13 +20,6 @@ enum nf_br_hook_priorities { ...@@ -20,13 +20,6 @@ enum nf_br_hook_priorities {
#define BRNF_BRIDGED_DNAT 0x02 #define BRNF_BRIDGED_DNAT 0x02
#define BRNF_NF_BRIDGE_PREROUTING 0x08 #define BRNF_NF_BRIDGE_PREROUTING 0x08
static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
return PPPOE_SES_HLEN;
return 0;
}
int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
static inline void br_drop_fake_rtable(struct sk_buff *skb) static inline void br_drop_fake_rtable(struct sk_buff *skb)
......
...@@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void); ...@@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void);
struct nf_ipv6_ops { struct nf_ipv6_ops {
int (*chk_addr)(struct net *net, const struct in6_addr *addr, int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict); const struct net_device *dev, int strict);
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct sock *sk, struct sk_buff *skb,
int (*output)(struct sock *, struct sk_buff *));
}; };
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <net/flow_dissector.h> #include <net/flow_dissector.h>
#include <linux/splice.h> #include <linux/splice.h>
#include <linux/in6.h>
/* A. Checksumming of received packets by device. /* A. Checksumming of received packets by device.
* *
...@@ -173,13 +174,17 @@ struct nf_bridge_info { ...@@ -173,13 +174,17 @@ struct nf_bridge_info {
BRNF_PROTO_PPPOE BRNF_PROTO_PPPOE
} orig_proto:8; } orig_proto:8;
bool pkt_otherhost; bool pkt_otherhost;
__u16 frag_max_size;
unsigned int mask; unsigned int mask;
struct net_device *physindev; struct net_device *physindev;
union { union {
struct net_device *physoutdev; struct net_device *physoutdev;
char neigh_header[8]; char neigh_header[8];
}; };
union {
__be32 ipv4_daddr; __be32 ipv4_daddr;
struct in6_addr ipv6_daddr;
};
}; };
#endif #endif
......
...@@ -781,6 +781,7 @@ struct nft_stats { ...@@ -781,6 +781,7 @@ struct nft_stats {
}; };
#define NFT_HOOK_OPS_MAX 2 #define NFT_HOOK_OPS_MAX 2
#define NFT_BASECHAIN_DISABLED (1 << 0)
/** /**
* struct nft_base_chain - nf_tables base chain * struct nft_base_chain - nf_tables base chain
...@@ -791,14 +792,17 @@ struct nft_stats { ...@@ -791,14 +792,17 @@ struct nft_stats {
* @policy: default policy * @policy: default policy
* @stats: per-cpu chain stats * @stats: per-cpu chain stats
* @chain: the chain * @chain: the chain
* @dev_name: device name that this base chain is attached to (if any)
*/ */
struct nft_base_chain { struct nft_base_chain {
struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
possible_net_t pnet; possible_net_t pnet;
const struct nf_chain_type *type; const struct nf_chain_type *type;
u8 policy; u8 policy;
u8 flags;
struct nft_stats __percpu *stats; struct nft_stats __percpu *stats;
struct nft_chain chain; struct nft_chain chain;
char dev_name[IFNAMSIZ];
}; };
static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain) static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
...@@ -806,6 +810,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai ...@@ -806,6 +810,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
return container_of(chain, struct nft_base_chain, chain); return container_of(chain, struct nft_base_chain, chain);
} }
int nft_register_basechain(struct nft_base_chain *basechain,
unsigned int hook_nops);
void nft_unregister_basechain(struct nft_base_chain *basechain,
unsigned int hook_nops);
unsigned int nft_do_chain(struct nft_pktinfo *pkt, unsigned int nft_do_chain(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops); const struct nf_hook_ops *ops);
...@@ -819,7 +828,6 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, ...@@ -819,7 +828,6 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt,
* @use: number of chain references to this table * @use: number of chain references to this table
* @flags: table flag (see enum nft_table_flags) * @flags: table flag (see enum nft_table_flags)
* @name: name of the table * @name: name of the table
* @dev: this table is bound to this device (if any)
*/ */
struct nft_table { struct nft_table {
struct list_head list; struct list_head list;
...@@ -829,7 +837,6 @@ struct nft_table { ...@@ -829,7 +837,6 @@ struct nft_table {
u32 use; u32 use;
u16 flags; u16 flags;
char name[NFT_TABLE_MAXNAMELEN]; char name[NFT_TABLE_MAXNAMELEN];
struct net_device *dev;
}; };
enum nft_af_flags { enum nft_af_flags {
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
/* The protocol version */ /* The protocol version */
#define IPSET_PROTOCOL 6 #define IPSET_PROTOCOL 6
/* The maximum permissible comment length we will accept over netlink */
#define IPSET_MAX_COMMENT_SIZE 255
/* The max length of strings including NUL: set and type identifiers */ /* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32 #define IPSET_MAXNAMELEN 32
/* The maximum permissible comment length we will accept over netlink */
#define IPSET_MAX_COMMENT_SIZE 255
/* Message types and commands */ /* Message types and commands */
enum ipset_cmd { enum ipset_cmd {
IPSET_CMD_NONE, IPSET_CMD_NONE,
......
...@@ -122,11 +122,13 @@ enum nft_list_attributes { ...@@ -122,11 +122,13 @@ enum nft_list_attributes {
* *
* @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32) * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
* @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32) * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
* @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
*/ */
enum nft_hook_attributes { enum nft_hook_attributes {
NFTA_HOOK_UNSPEC, NFTA_HOOK_UNSPEC,
NFTA_HOOK_HOOKNUM, NFTA_HOOK_HOOKNUM,
NFTA_HOOK_PRIORITY, NFTA_HOOK_PRIORITY,
NFTA_HOOK_DEV,
__NFTA_HOOK_MAX __NFTA_HOOK_MAX
}; };
#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1) #define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
...@@ -146,14 +148,12 @@ enum nft_table_flags { ...@@ -146,14 +148,12 @@ enum nft_table_flags {
* @NFTA_TABLE_NAME: name of the table (NLA_STRING) * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
* @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32) * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
* @NFTA_TABLE_USE: number of chains in this table (NLA_U32) * @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
* @NFTA_TABLE_DEV: net device name (NLA_STRING)
*/ */
enum nft_table_attributes { enum nft_table_attributes {
NFTA_TABLE_UNSPEC, NFTA_TABLE_UNSPEC,
NFTA_TABLE_NAME, NFTA_TABLE_NAME,
NFTA_TABLE_FLAGS, NFTA_TABLE_FLAGS,
NFTA_TABLE_USE, NFTA_TABLE_USE,
NFTA_TABLE_DEV,
__NFTA_TABLE_MAX __NFTA_TABLE_MAX
}; };
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
......
This diff is collapsed.
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/netpoll.h> #include <linux/netpoll.h>
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
#include <net/route.h> #include <net/route.h>
#include <net/ip6_fib.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#define BR_HASH_BITS 8 #define BR_HASH_BITS 8
...@@ -214,7 +215,10 @@ struct net_bridge ...@@ -214,7 +215,10 @@ struct net_bridge
spinlock_t hash_lock; spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE]; struct hlist_head hash[BR_HASH_SIZE];
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
union {
struct rtable fake_rtable; struct rtable fake_rtable;
struct rt6_info fake_rt6_info;
};
bool nf_call_iptables; bool nf_call_iptables;
bool nf_call_ip6tables; bool nf_call_ip6tables;
bool nf_call_arptables; bool nf_call_arptables;
...@@ -304,7 +308,6 @@ struct br_input_skb_cb { ...@@ -304,7 +308,6 @@ struct br_input_skb_cb {
int mrouters_only; int mrouters_only;
#endif #endif
u16 frag_max_size;
bool proxyarp_replied; bool proxyarp_replied;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING #ifdef CONFIG_BRIDGE_VLAN_FILTERING
......
...@@ -549,10 +549,6 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, ...@@ -549,10 +549,6 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
hlen = iph->ihl * 4; hlen = iph->ihl * 4;
mtu = mtu - hlen; /* Size of data space */ mtu = mtu - hlen; /* Size of data space */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity: /* When frag_list is given, use it. First, check its validity:
......
...@@ -195,7 +195,8 @@ config IP_NF_MATCH_ECN ...@@ -195,7 +195,8 @@ config IP_NF_MATCH_ECN
config IP_NF_MATCH_RPFILTER config IP_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support' tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED && (IP_NF_MANGLE || IP_NF_RAW) depends on NETFILTER_ADVANCED
depends on IP_NF_MANGLE || IP_NF_RAW
---help--- ---help---
This option allows you to match packets whose replies would This option allows you to match packets whose replies would
go out via the interface the packet came in. go out via the interface the packet came in.
......
...@@ -256,7 +256,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, ...@@ -256,7 +256,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
const struct arphdr *arp; const struct arphdr *arp;
struct arpt_entry *e, *back; struct arpt_entry *e, *back;
const char *indev, *outdev; const char *indev, *outdev;
void *table_base; const void *table_base;
const struct xt_table_info *private; const struct xt_table_info *private;
struct xt_action_param acpar; struct xt_action_param acpar;
unsigned int addend; unsigned int addend;
...@@ -275,7 +275,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, ...@@ -275,7 +275,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
* pointer. * pointer.
*/ */
smp_read_barrier_depends(); smp_read_barrier_depends();
table_base = private->entries[smp_processor_id()]; table_base = private->entries;
e = get_entry(table_base, private->hook_entry[hook]); e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]); back = get_entry(table_base, private->underflow[hook]);
...@@ -289,13 +289,15 @@ unsigned int arpt_do_table(struct sk_buff *skb, ...@@ -289,13 +289,15 @@ unsigned int arpt_do_table(struct sk_buff *skb,
arp = arp_hdr(skb); arp = arp_hdr(skb);
do { do {
const struct xt_entry_target *t; const struct xt_entry_target *t;
struct xt_counters *counter;
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
e = arpt_next_entry(e); e = arpt_next_entry(e);
continue; continue;
} }
ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1); counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1);
t = arpt_get_target_c(e); t = arpt_get_target_c(e);
...@@ -521,6 +523,10 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) ...@@ -521,6 +523,10 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
if (ret) if (ret)
return ret; return ret;
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
t = arpt_get_target(e); t = arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision); t->u.user.revision);
...@@ -538,6 +544,8 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) ...@@ -538,6 +544,8 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
err: err:
module_put(t->u.kernel.target->me); module_put(t->u.kernel.target->me);
out: out:
xt_percpu_counter_free(e->counters.pcnt);
return ret; return ret;
} }
...@@ -614,6 +622,7 @@ static inline void cleanup_entry(struct arpt_entry *e) ...@@ -614,6 +622,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
if (par.target->destroy != NULL) if (par.target->destroy != NULL)
par.target->destroy(&par); par.target->destroy(&par);
module_put(par.target->me); module_put(par.target->me);
xt_percpu_counter_free(e->counters.pcnt);
} }
/* Checks and translates the user-supplied table segment (held in /* Checks and translates the user-supplied table segment (held in
...@@ -702,12 +711,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, ...@@ -702,12 +711,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
return ret; return ret;
} }
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return ret; return ret;
} }
...@@ -722,14 +725,16 @@ static void get_counters(const struct xt_table_info *t, ...@@ -722,14 +725,16 @@ static void get_counters(const struct xt_table_info *t,
seqcount_t *s = &per_cpu(xt_recseq, cpu); seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0; i = 0;
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
u64 bcnt, pcnt; u64 bcnt, pcnt;
unsigned int start; unsigned int start;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
do { do {
start = read_seqcount_begin(s); start = read_seqcount_begin(s);
bcnt = iter->counters.bcnt; bcnt = tmp->bcnt;
pcnt = iter->counters.pcnt; pcnt = tmp->pcnt;
} while (read_seqcount_retry(s, start)); } while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt); ADD_COUNTER(counters[i], bcnt, pcnt);
...@@ -774,7 +779,7 @@ static int copy_entries_to_user(unsigned int total_size, ...@@ -774,7 +779,7 @@ static int copy_entries_to_user(unsigned int total_size,
if (IS_ERR(counters)) if (IS_ERR(counters))
return PTR_ERR(counters); return PTR_ERR(counters);
loc_cpu_entry = private->entries[raw_smp_processor_id()]; loc_cpu_entry = private->entries;
/* ... then copy entire thing ... */ /* ... then copy entire thing ... */
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -863,16 +868,16 @@ static int compat_table_info(const struct xt_table_info *info, ...@@ -863,16 +868,16 @@ static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo) struct xt_table_info *newinfo)
{ {
struct arpt_entry *iter; struct arpt_entry *iter;
void *loc_cpu_entry; const void *loc_cpu_entry;
int ret; int ret;
if (!newinfo || !info) if (!newinfo || !info)
return -EINVAL; return -EINVAL;
/* we dont care about newinfo->entries[] */ /* we dont care about newinfo->entries */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0; newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()]; loc_cpu_entry = info->entries;
xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_compat_init_offsets(NFPROTO_ARP, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) { xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
...@@ -1037,7 +1042,7 @@ static int __do_replace(struct net *net, const char *name, ...@@ -1037,7 +1042,7 @@ static int __do_replace(struct net *net, const char *name,
get_counters(oldinfo, counters); get_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */ /* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; loc_cpu_old_entry = oldinfo->entries;
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
cleanup_entry(iter); cleanup_entry(iter);
...@@ -1084,8 +1089,7 @@ static int do_replace(struct net *net, const void __user *user, ...@@ -1084,8 +1089,7 @@ static int do_replace(struct net *net, const void __user *user,
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
/* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) { tmp.size) != 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1115,7 +1119,7 @@ static int do_replace(struct net *net, const void __user *user, ...@@ -1115,7 +1119,7 @@ static int do_replace(struct net *net, const void __user *user,
static int do_add_counters(struct net *net, const void __user *user, static int do_add_counters(struct net *net, const void __user *user,
unsigned int len, int compat) unsigned int len, int compat)
{ {
unsigned int i, curcpu; unsigned int i;
struct xt_counters_info tmp; struct xt_counters_info tmp;
struct xt_counters *paddc; struct xt_counters *paddc;
unsigned int num_counters; unsigned int num_counters;
...@@ -1125,7 +1129,6 @@ static int do_add_counters(struct net *net, const void __user *user, ...@@ -1125,7 +1129,6 @@ static int do_add_counters(struct net *net, const void __user *user,
struct xt_table *t; struct xt_table *t;
const struct xt_table_info *private; const struct xt_table_info *private;
int ret = 0; int ret = 0;
void *loc_cpu_entry;
struct arpt_entry *iter; struct arpt_entry *iter;
unsigned int addend; unsigned int addend;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -1181,12 +1184,13 @@ static int do_add_counters(struct net *net, const void __user *user, ...@@ -1181,12 +1184,13 @@ static int do_add_counters(struct net *net, const void __user *user,
} }
i = 0; i = 0;
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
loc_cpu_entry = private->entries[curcpu];
addend = xt_write_recseq_begin(); addend = xt_write_recseq_begin();
xt_entry_foreach(iter, loc_cpu_entry, private->size) { xt_entry_foreach(iter, private->entries, private->size) {
ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); struct xt_counters *tmp;
tmp = xt_get_this_cpu_counter(&iter->counters);
ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
++i; ++i;
} }
xt_write_recseq_end(addend); xt_write_recseq_end(addend);
...@@ -1396,7 +1400,7 @@ static int translate_compat_table(const char *name, ...@@ -1396,7 +1400,7 @@ static int translate_compat_table(const char *name,
newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->hook_entry[i] = info->hook_entry[i];
newinfo->underflow[i] = info->underflow[i]; newinfo->underflow[i] = info->underflow[i];
} }
entry1 = newinfo->entries[raw_smp_processor_id()]; entry1 = newinfo->entries;
pos = entry1; pos = entry1;
size = total_size; size = total_size;
xt_entry_foreach(iter0, entry0, total_size) { xt_entry_foreach(iter0, entry0, total_size) {
...@@ -1416,9 +1420,17 @@ static int translate_compat_table(const char *name, ...@@ -1416,9 +1420,17 @@ static int translate_compat_table(const char *name,
i = 0; i = 0;
xt_entry_foreach(iter1, entry1, newinfo->size) { xt_entry_foreach(iter1, entry1, newinfo->size) {
iter1->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(iter1->counters.pcnt)) {
ret = -ENOMEM;
break;
}
ret = check_target(iter1, name); ret = check_target(iter1, name);
if (ret != 0) if (ret != 0) {
xt_percpu_counter_free(iter1->counters.pcnt);
break; break;
}
++i; ++i;
if (strcmp(arpt_get_target(iter1)->u.user.name, if (strcmp(arpt_get_target(iter1)->u.user.name,
XT_ERROR_TARGET) == 0) XT_ERROR_TARGET) == 0)
...@@ -1448,11 +1460,6 @@ static int translate_compat_table(const char *name, ...@@ -1448,11 +1460,6 @@ static int translate_compat_table(const char *name,
return ret; return ret;
} }
/* And one copy for every other CPU */
for_each_possible_cpu(i)
if (newinfo->entries[i] && newinfo->entries[i] != entry1)
memcpy(newinfo->entries[i], entry1, newinfo->size);
*pinfo = newinfo; *pinfo = newinfo;
*pentry0 = entry1; *pentry0 = entry1;
xt_free_table_info(info); xt_free_table_info(info);
...@@ -1511,8 +1518,7 @@ static int compat_do_replace(struct net *net, void __user *user, ...@@ -1511,8 +1518,7 @@ static int compat_do_replace(struct net *net, void __user *user,
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
/* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
ret = -EFAULT; ret = -EFAULT;
goto free_newinfo; goto free_newinfo;
...@@ -1609,7 +1615,6 @@ static int compat_copy_entries_to_user(unsigned int total_size, ...@@ -1609,7 +1615,6 @@ static int compat_copy_entries_to_user(unsigned int total_size,
void __user *pos; void __user *pos;
unsigned int size; unsigned int size;
int ret = 0; int ret = 0;
void *loc_cpu_entry;
unsigned int i = 0; unsigned int i = 0;
struct arpt_entry *iter; struct arpt_entry *iter;
...@@ -1617,11 +1622,9 @@ static int compat_copy_entries_to_user(unsigned int total_size, ...@@ -1617,11 +1622,9 @@ static int compat_copy_entries_to_user(unsigned int total_size,
if (IS_ERR(counters)) if (IS_ERR(counters))
return PTR_ERR(counters); return PTR_ERR(counters);
/* choose the copy on our node/cpu */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr; pos = userptr;
size = total_size; size = total_size;
xt_entry_foreach(iter, loc_cpu_entry, total_size) { xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos, ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++); &size, counters, i++);
if (ret != 0) if (ret != 0)
...@@ -1790,8 +1793,7 @@ struct xt_table *arpt_register_table(struct net *net, ...@@ -1790,8 +1793,7 @@ struct xt_table *arpt_register_table(struct net *net,
goto out; goto out;
} }
/* choose the copy on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size); memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(newinfo, loc_cpu_entry, repl); ret = translate_table(newinfo, loc_cpu_entry, repl);
...@@ -1822,7 +1824,7 @@ void arpt_unregister_table(struct xt_table *table) ...@@ -1822,7 +1824,7 @@ void arpt_unregister_table(struct xt_table *table)
private = xt_unregister_table(table); private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */ /* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()]; loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size) xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter); cleanup_entry(iter);
if (private->number > private->initial_entries) if (private->number > private->initial_entries)
......
...@@ -254,15 +254,13 @@ static void trace_packet(const struct sk_buff *skb, ...@@ -254,15 +254,13 @@ static void trace_packet(const struct sk_buff *skb,
const struct xt_table_info *private, const struct xt_table_info *private,
const struct ipt_entry *e) const struct ipt_entry *e)
{ {
const void *table_base;
const struct ipt_entry *root; const struct ipt_entry *root;
const char *hookname, *chainname, *comment; const char *hookname, *chainname, *comment;
const struct ipt_entry *iter; const struct ipt_entry *iter;
unsigned int rulenum = 0; unsigned int rulenum = 0;
struct net *net = dev_net(in ? in : out); struct net *net = dev_net(in ? in : out);
table_base = private->entries[smp_processor_id()]; root = get_entry(private->entries, private->hook_entry[hook]);
root = get_entry(table_base, private->hook_entry[hook]);
hookname = chainname = hooknames[hook]; hookname = chainname = hooknames[hook];
comment = comments[NF_IP_TRACE_COMMENT_RULE]; comment = comments[NF_IP_TRACE_COMMENT_RULE];
...@@ -331,7 +329,7 @@ ipt_do_table(struct sk_buff *skb, ...@@ -331,7 +329,7 @@ ipt_do_table(struct sk_buff *skb,
* pointer. * pointer.
*/ */
smp_read_barrier_depends(); smp_read_barrier_depends();
table_base = private->entries[cpu]; table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
stackptr = per_cpu_ptr(private->stackptr, cpu); stackptr = per_cpu_ptr(private->stackptr, cpu);
origptr = *stackptr; origptr = *stackptr;
...@@ -345,6 +343,7 @@ ipt_do_table(struct sk_buff *skb, ...@@ -345,6 +343,7 @@ ipt_do_table(struct sk_buff *skb,
do { do {
const struct xt_entry_target *t; const struct xt_entry_target *t;
const struct xt_entry_match *ematch; const struct xt_entry_match *ematch;
struct xt_counters *counter;
IP_NF_ASSERT(e); IP_NF_ASSERT(e);
if (!ip_packet_match(ip, indev, outdev, if (!ip_packet_match(ip, indev, outdev,
...@@ -361,7 +360,8 @@ ipt_do_table(struct sk_buff *skb, ...@@ -361,7 +360,8 @@ ipt_do_table(struct sk_buff *skb,
goto no_match; goto no_match;
} }
ADD_COUNTER(e->counters, skb->len, 1); counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, skb->len, 1);
t = ipt_get_target(e); t = ipt_get_target(e);
IP_NF_ASSERT(t->u.kernel.target); IP_NF_ASSERT(t->u.kernel.target);
...@@ -665,6 +665,10 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, ...@@ -665,6 +665,10 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
if (ret) if (ret)
return ret; return ret;
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
j = 0; j = 0;
mtpar.net = net; mtpar.net = net;
mtpar.table = name; mtpar.table = name;
...@@ -691,6 +695,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, ...@@ -691,6 +695,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
ret = check_target(e, net, name); ret = check_target(e, net, name);
if (ret) if (ret)
goto err; goto err;
return 0; return 0;
err: err:
module_put(t->u.kernel.target->me); module_put(t->u.kernel.target->me);
...@@ -700,6 +705,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, ...@@ -700,6 +705,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
break; break;
cleanup_match(ematch, net); cleanup_match(ematch, net);
} }
xt_percpu_counter_free(e->counters.pcnt);
return ret; return ret;
} }
...@@ -784,6 +792,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net) ...@@ -784,6 +792,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net)
if (par.target->destroy != NULL) if (par.target->destroy != NULL)
par.target->destroy(&par); par.target->destroy(&par);
module_put(par.target->me); module_put(par.target->me);
xt_percpu_counter_free(e->counters.pcnt);
} }
/* Checks and translates the user-supplied table segment (held in /* Checks and translates the user-supplied table segment (held in
...@@ -866,12 +875,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, ...@@ -866,12 +875,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
return ret; return ret;
} }
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return ret; return ret;
} }
...@@ -887,14 +890,16 @@ get_counters(const struct xt_table_info *t, ...@@ -887,14 +890,16 @@ get_counters(const struct xt_table_info *t,
seqcount_t *s = &per_cpu(xt_recseq, cpu); seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0; i = 0;
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
u64 bcnt, pcnt; u64 bcnt, pcnt;
unsigned int start; unsigned int start;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
do { do {
start = read_seqcount_begin(s); start = read_seqcount_begin(s);
bcnt = iter->counters.bcnt; bcnt = tmp->bcnt;
pcnt = iter->counters.pcnt; pcnt = tmp->pcnt;
} while (read_seqcount_retry(s, start)); } while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt); ADD_COUNTER(counters[i], bcnt, pcnt);
...@@ -939,11 +944,7 @@ copy_entries_to_user(unsigned int total_size, ...@@ -939,11 +944,7 @@ copy_entries_to_user(unsigned int total_size,
if (IS_ERR(counters)) if (IS_ERR(counters))
return PTR_ERR(counters); return PTR_ERR(counters);
/* choose the copy that is on our node/cpu, ... loc_cpu_entry = private->entries;
* This choice is lazy (because current thread is
* allowed to migrate to another cpu)
*/
loc_cpu_entry = private->entries[raw_smp_processor_id()];
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT; ret = -EFAULT;
goto free_counters; goto free_counters;
...@@ -1051,16 +1052,16 @@ static int compat_table_info(const struct xt_table_info *info, ...@@ -1051,16 +1052,16 @@ static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo) struct xt_table_info *newinfo)
{ {
struct ipt_entry *iter; struct ipt_entry *iter;
void *loc_cpu_entry; const void *loc_cpu_entry;
int ret; int ret;
if (!newinfo || !info) if (!newinfo || !info)
return -EINVAL; return -EINVAL;
/* we dont care about newinfo->entries[] */ /* we dont care about newinfo->entries */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0; newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()]; loc_cpu_entry = info->entries;
xt_compat_init_offsets(AF_INET, info->number); xt_compat_init_offsets(AF_INET, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) { xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
...@@ -1181,7 +1182,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, ...@@ -1181,7 +1182,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table *t; struct xt_table *t;
struct xt_table_info *oldinfo; struct xt_table_info *oldinfo;
struct xt_counters *counters; struct xt_counters *counters;
void *loc_cpu_old_entry;
struct ipt_entry *iter; struct ipt_entry *iter;
ret = 0; ret = 0;
...@@ -1224,8 +1224,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, ...@@ -1224,8 +1224,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
get_counters(oldinfo, counters); get_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */ /* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
cleanup_entry(iter, net); cleanup_entry(iter, net);
xt_free_table_info(oldinfo); xt_free_table_info(oldinfo);
...@@ -1271,8 +1270,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) ...@@ -1271,8 +1270,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
/* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) { tmp.size) != 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1303,7 +1301,7 @@ static int ...@@ -1303,7 +1301,7 @@ static int
do_add_counters(struct net *net, const void __user *user, do_add_counters(struct net *net, const void __user *user,
unsigned int len, int compat) unsigned int len, int compat)
{ {
unsigned int i, curcpu; unsigned int i;
struct xt_counters_info tmp; struct xt_counters_info tmp;
struct xt_counters *paddc; struct xt_counters *paddc;
unsigned int num_counters; unsigned int num_counters;
...@@ -1313,7 +1311,6 @@ do_add_counters(struct net *net, const void __user *user, ...@@ -1313,7 +1311,6 @@ do_add_counters(struct net *net, const void __user *user,
struct xt_table *t; struct xt_table *t;
const struct xt_table_info *private; const struct xt_table_info *private;
int ret = 0; int ret = 0;
void *loc_cpu_entry;
struct ipt_entry *iter; struct ipt_entry *iter;
unsigned int addend; unsigned int addend;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -1369,12 +1366,12 @@ do_add_counters(struct net *net, const void __user *user, ...@@ -1369,12 +1366,12 @@ do_add_counters(struct net *net, const void __user *user,
} }
i = 0; i = 0;
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
loc_cpu_entry = private->entries[curcpu];
addend = xt_write_recseq_begin(); addend = xt_write_recseq_begin();
xt_entry_foreach(iter, loc_cpu_entry, private->size) { xt_entry_foreach(iter, private->entries, private->size) {
ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); struct xt_counters *tmp;
tmp = xt_get_this_cpu_counter(&iter->counters);
ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
++i; ++i;
} }
xt_write_recseq_end(addend); xt_write_recseq_end(addend);
...@@ -1608,6 +1605,10 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) ...@@ -1608,6 +1605,10 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
unsigned int j; unsigned int j;
int ret = 0; int ret = 0;
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
j = 0; j = 0;
mtpar.net = net; mtpar.net = net;
mtpar.table = name; mtpar.table = name;
...@@ -1632,6 +1633,9 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) ...@@ -1632,6 +1633,9 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
break; break;
cleanup_match(ematch, net); cleanup_match(ematch, net);
} }
xt_percpu_counter_free(e->counters.pcnt);
return ret; return ret;
} }
...@@ -1716,7 +1720,7 @@ translate_compat_table(struct net *net, ...@@ -1716,7 +1720,7 @@ translate_compat_table(struct net *net,
newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->hook_entry[i] = info->hook_entry[i];
newinfo->underflow[i] = info->underflow[i]; newinfo->underflow[i] = info->underflow[i];
} }
entry1 = newinfo->entries[raw_smp_processor_id()]; entry1 = newinfo->entries;
pos = entry1; pos = entry1;
size = total_size; size = total_size;
xt_entry_foreach(iter0, entry0, total_size) { xt_entry_foreach(iter0, entry0, total_size) {
...@@ -1768,11 +1772,6 @@ translate_compat_table(struct net *net, ...@@ -1768,11 +1772,6 @@ translate_compat_table(struct net *net,
return ret; return ret;
} }
/* And one copy for every other CPU */
for_each_possible_cpu(i)
if (newinfo->entries[i] && newinfo->entries[i] != entry1)
memcpy(newinfo->entries[i], entry1, newinfo->size);
*pinfo = newinfo; *pinfo = newinfo;
*pentry0 = entry1; *pentry0 = entry1;
xt_free_table_info(info); xt_free_table_info(info);
...@@ -1819,8 +1818,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) ...@@ -1819,8 +1818,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
/* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) { tmp.size) != 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1891,7 +1889,6 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, ...@@ -1891,7 +1889,6 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *pos; void __user *pos;
unsigned int size; unsigned int size;
int ret = 0; int ret = 0;
const void *loc_cpu_entry;
unsigned int i = 0; unsigned int i = 0;
struct ipt_entry *iter; struct ipt_entry *iter;
...@@ -1899,14 +1896,9 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, ...@@ -1899,14 +1896,9 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
if (IS_ERR(counters)) if (IS_ERR(counters))
return PTR_ERR(counters); return PTR_ERR(counters);
/* choose the copy that is on our node/cpu, ...
* This choice is lazy (because current thread is
* allowed to migrate to another cpu)
*/
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr; pos = userptr;
size = total_size; size = total_size;
xt_entry_foreach(iter, loc_cpu_entry, total_size) { xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos, ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++); &size, counters, i++);
if (ret != 0) if (ret != 0)
...@@ -2081,8 +2073,7 @@ struct xt_table *ipt_register_table(struct net *net, ...@@ -2081,8 +2073,7 @@ struct xt_table *ipt_register_table(struct net *net,
goto out; goto out;
} }
/* choose the copy on our node/cpu, but dont care about preemption */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size); memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(net, newinfo, loc_cpu_entry, repl); ret = translate_table(net, newinfo, loc_cpu_entry, repl);
...@@ -2113,7 +2104,7 @@ void ipt_unregister_table(struct net *net, struct xt_table *table) ...@@ -2113,7 +2104,7 @@ void ipt_unregister_table(struct net *net, struct xt_table *table)
private = xt_unregister_table(table); private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */ /* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()]; loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size) xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter, net); cleanup_entry(iter, net);
if (private->number > private->initial_entries) if (private->number > private->initial_entries)
......
...@@ -191,6 +191,8 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, ...@@ -191,6 +191,8 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
static const struct nf_ipv6_ops ipv6ops = { static const struct nf_ipv6_ops ipv6ops = {
.chk_addr = ipv6_chk_addr, .chk_addr = ipv6_chk_addr,
.route_input = ip6_route_input,
.fragment = ip6_fragment
}; };
static const struct nf_afinfo nf_ip6_afinfo = { static const struct nf_afinfo nf_ip6_afinfo = {
......
...@@ -186,7 +186,8 @@ config IP6_NF_MATCH_MH ...@@ -186,7 +186,8 @@ config IP6_NF_MATCH_MH
config IP6_NF_MATCH_RPFILTER config IP6_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support' tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED && (IP6_NF_MANGLE || IP6_NF_RAW) depends on NETFILTER_ADVANCED
depends on IP6_NF_MANGLE || IP6_NF_RAW
---help--- ---help---
This option allows you to match packets whose replies would This option allows you to match packets whose replies would
go out via the interface the packet came in. go out via the interface the packet came in.
......
...@@ -283,15 +283,13 @@ static void trace_packet(const struct sk_buff *skb, ...@@ -283,15 +283,13 @@ static void trace_packet(const struct sk_buff *skb,
const struct xt_table_info *private, const struct xt_table_info *private,
const struct ip6t_entry *e) const struct ip6t_entry *e)
{ {
const void *table_base;
const struct ip6t_entry *root; const struct ip6t_entry *root;
const char *hookname, *chainname, *comment; const char *hookname, *chainname, *comment;
const struct ip6t_entry *iter; const struct ip6t_entry *iter;
unsigned int rulenum = 0; unsigned int rulenum = 0;
struct net *net = dev_net(in ? in : out); struct net *net = dev_net(in ? in : out);
table_base = private->entries[smp_processor_id()]; root = get_entry(private->entries, private->hook_entry[hook]);
root = get_entry(table_base, private->hook_entry[hook]);
hookname = chainname = hooknames[hook]; hookname = chainname = hooknames[hook];
comment = comments[NF_IP6_TRACE_COMMENT_RULE]; comment = comments[NF_IP6_TRACE_COMMENT_RULE];
...@@ -357,7 +355,7 @@ ip6t_do_table(struct sk_buff *skb, ...@@ -357,7 +355,7 @@ ip6t_do_table(struct sk_buff *skb,
*/ */
smp_read_barrier_depends(); smp_read_barrier_depends();
cpu = smp_processor_id(); cpu = smp_processor_id();
table_base = private->entries[cpu]; table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
stackptr = per_cpu_ptr(private->stackptr, cpu); stackptr = per_cpu_ptr(private->stackptr, cpu);
origptr = *stackptr; origptr = *stackptr;
...@@ -367,6 +365,7 @@ ip6t_do_table(struct sk_buff *skb, ...@@ -367,6 +365,7 @@ ip6t_do_table(struct sk_buff *skb,
do { do {
const struct xt_entry_target *t; const struct xt_entry_target *t;
const struct xt_entry_match *ematch; const struct xt_entry_match *ematch;
struct xt_counters *counter;
IP_NF_ASSERT(e); IP_NF_ASSERT(e);
acpar.thoff = 0; acpar.thoff = 0;
...@@ -384,7 +383,8 @@ ip6t_do_table(struct sk_buff *skb, ...@@ -384,7 +383,8 @@ ip6t_do_table(struct sk_buff *skb,
goto no_match; goto no_match;
} }
ADD_COUNTER(e->counters, skb->len, 1); counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, skb->len, 1);
t = ip6t_get_target_c(e); t = ip6t_get_target_c(e);
IP_NF_ASSERT(t->u.kernel.target); IP_NF_ASSERT(t->u.kernel.target);
...@@ -679,6 +679,10 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, ...@@ -679,6 +679,10 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
if (ret) if (ret)
return ret; return ret;
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
j = 0; j = 0;
mtpar.net = net; mtpar.net = net;
mtpar.table = name; mtpar.table = name;
...@@ -714,6 +718,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, ...@@ -714,6 +718,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
break; break;
cleanup_match(ematch, net); cleanup_match(ematch, net);
} }
xt_percpu_counter_free(e->counters.pcnt);
return ret; return ret;
} }
...@@ -797,6 +804,8 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net) ...@@ -797,6 +804,8 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net)
if (par.target->destroy != NULL) if (par.target->destroy != NULL)
par.target->destroy(&par); par.target->destroy(&par);
module_put(par.target->me); module_put(par.target->me);
xt_percpu_counter_free(e->counters.pcnt);
} }
/* Checks and translates the user-supplied table segment (held in /* Checks and translates the user-supplied table segment (held in
...@@ -879,12 +888,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, ...@@ -879,12 +888,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
return ret; return ret;
} }
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return ret; return ret;
} }
...@@ -900,14 +903,16 @@ get_counters(const struct xt_table_info *t, ...@@ -900,14 +903,16 @@ get_counters(const struct xt_table_info *t,
seqcount_t *s = &per_cpu(xt_recseq, cpu); seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0; i = 0;
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
u64 bcnt, pcnt; u64 bcnt, pcnt;
unsigned int start; unsigned int start;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
do { do {
start = read_seqcount_begin(s); start = read_seqcount_begin(s);
bcnt = iter->counters.bcnt; bcnt = tmp->bcnt;
pcnt = iter->counters.pcnt; pcnt = tmp->pcnt;
} while (read_seqcount_retry(s, start)); } while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt); ADD_COUNTER(counters[i], bcnt, pcnt);
...@@ -952,11 +957,7 @@ copy_entries_to_user(unsigned int total_size, ...@@ -952,11 +957,7 @@ copy_entries_to_user(unsigned int total_size,
if (IS_ERR(counters)) if (IS_ERR(counters))
return PTR_ERR(counters); return PTR_ERR(counters);
/* choose the copy that is on our node/cpu, ... loc_cpu_entry = private->entries;
* This choice is lazy (because current thread is
* allowed to migrate to another cpu)
*/
loc_cpu_entry = private->entries[raw_smp_processor_id()];
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT; ret = -EFAULT;
goto free_counters; goto free_counters;
...@@ -1064,16 +1065,16 @@ static int compat_table_info(const struct xt_table_info *info, ...@@ -1064,16 +1065,16 @@ static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo) struct xt_table_info *newinfo)
{ {
struct ip6t_entry *iter; struct ip6t_entry *iter;
void *loc_cpu_entry; const void *loc_cpu_entry;
int ret; int ret;
if (!newinfo || !info) if (!newinfo || !info)
return -EINVAL; return -EINVAL;
/* we dont care about newinfo->entries[] */ /* we dont care about newinfo->entries */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0; newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()]; loc_cpu_entry = info->entries;
xt_compat_init_offsets(AF_INET6, info->number); xt_compat_init_offsets(AF_INET6, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) { xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
...@@ -1194,7 +1195,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, ...@@ -1194,7 +1195,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table *t; struct xt_table *t;
struct xt_table_info *oldinfo; struct xt_table_info *oldinfo;
struct xt_counters *counters; struct xt_counters *counters;
const void *loc_cpu_old_entry;
struct ip6t_entry *iter; struct ip6t_entry *iter;
ret = 0; ret = 0;
...@@ -1237,8 +1237,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, ...@@ -1237,8 +1237,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
get_counters(oldinfo, counters); get_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */ /* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
cleanup_entry(iter, net); cleanup_entry(iter, net);
xt_free_table_info(oldinfo); xt_free_table_info(oldinfo);
...@@ -1284,8 +1283,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) ...@@ -1284,8 +1283,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
/* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) { tmp.size) != 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1316,7 +1314,7 @@ static int ...@@ -1316,7 +1314,7 @@ static int
do_add_counters(struct net *net, const void __user *user, unsigned int len, do_add_counters(struct net *net, const void __user *user, unsigned int len,
int compat) int compat)
{ {
unsigned int i, curcpu; unsigned int i;
struct xt_counters_info tmp; struct xt_counters_info tmp;
struct xt_counters *paddc; struct xt_counters *paddc;
unsigned int num_counters; unsigned int num_counters;
...@@ -1326,7 +1324,6 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, ...@@ -1326,7 +1324,6 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
struct xt_table *t; struct xt_table *t;
const struct xt_table_info *private; const struct xt_table_info *private;
int ret = 0; int ret = 0;
const void *loc_cpu_entry;
struct ip6t_entry *iter; struct ip6t_entry *iter;
unsigned int addend; unsigned int addend;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -1374,7 +1371,6 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, ...@@ -1374,7 +1371,6 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
goto free; goto free;
} }
local_bh_disable(); local_bh_disable();
private = t->private; private = t->private;
if (private->number != num_counters) { if (private->number != num_counters) {
...@@ -1383,16 +1379,15 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, ...@@ -1383,16 +1379,15 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
} }
i = 0; i = 0;
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
addend = xt_write_recseq_begin(); addend = xt_write_recseq_begin();
loc_cpu_entry = private->entries[curcpu]; xt_entry_foreach(iter, private->entries, private->size) {
xt_entry_foreach(iter, loc_cpu_entry, private->size) { struct xt_counters *tmp;
ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
tmp = xt_get_this_cpu_counter(&iter->counters);
ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
++i; ++i;
} }
xt_write_recseq_end(addend); xt_write_recseq_end(addend);
unlock_up_free: unlock_up_free:
local_bh_enable(); local_bh_enable();
xt_table_unlock(t); xt_table_unlock(t);
...@@ -1621,6 +1616,9 @@ static int compat_check_entry(struct ip6t_entry *e, struct net *net, ...@@ -1621,6 +1616,9 @@ static int compat_check_entry(struct ip6t_entry *e, struct net *net,
struct xt_mtchk_param mtpar; struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch; struct xt_entry_match *ematch;
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
j = 0; j = 0;
mtpar.net = net; mtpar.net = net;
mtpar.table = name; mtpar.table = name;
...@@ -1645,6 +1643,9 @@ static int compat_check_entry(struct ip6t_entry *e, struct net *net, ...@@ -1645,6 +1643,9 @@ static int compat_check_entry(struct ip6t_entry *e, struct net *net,
break; break;
cleanup_match(ematch, net); cleanup_match(ematch, net);
} }
xt_percpu_counter_free(e->counters.pcnt);
return ret; return ret;
} }
...@@ -1729,7 +1730,7 @@ translate_compat_table(struct net *net, ...@@ -1729,7 +1730,7 @@ translate_compat_table(struct net *net,
newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->hook_entry[i] = info->hook_entry[i];
newinfo->underflow[i] = info->underflow[i]; newinfo->underflow[i] = info->underflow[i];
} }
entry1 = newinfo->entries[raw_smp_processor_id()]; entry1 = newinfo->entries;
pos = entry1; pos = entry1;
size = total_size; size = total_size;
xt_entry_foreach(iter0, entry0, total_size) { xt_entry_foreach(iter0, entry0, total_size) {
...@@ -1781,11 +1782,6 @@ translate_compat_table(struct net *net, ...@@ -1781,11 +1782,6 @@ translate_compat_table(struct net *net,
return ret; return ret;
} }
/* And one copy for every other CPU */
for_each_possible_cpu(i)
if (newinfo->entries[i] && newinfo->entries[i] != entry1)
memcpy(newinfo->entries[i], entry1, newinfo->size);
*pinfo = newinfo; *pinfo = newinfo;
*pentry0 = entry1; *pentry0 = entry1;
xt_free_table_info(info); xt_free_table_info(info);
...@@ -1832,8 +1828,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) ...@@ -1832,8 +1828,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
/* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) { tmp.size) != 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1904,7 +1899,6 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, ...@@ -1904,7 +1899,6 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *pos; void __user *pos;
unsigned int size; unsigned int size;
int ret = 0; int ret = 0;
const void *loc_cpu_entry;
unsigned int i = 0; unsigned int i = 0;
struct ip6t_entry *iter; struct ip6t_entry *iter;
...@@ -1912,14 +1906,9 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, ...@@ -1912,14 +1906,9 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
if (IS_ERR(counters)) if (IS_ERR(counters))
return PTR_ERR(counters); return PTR_ERR(counters);
/* choose the copy that is on our node/cpu, ...
* This choice is lazy (because current thread is
* allowed to migrate to another cpu)
*/
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr; pos = userptr;
size = total_size; size = total_size;
xt_entry_foreach(iter, loc_cpu_entry, total_size) { xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos, ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++); &size, counters, i++);
if (ret != 0) if (ret != 0)
...@@ -2094,8 +2083,7 @@ struct xt_table *ip6t_register_table(struct net *net, ...@@ -2094,8 +2083,7 @@ struct xt_table *ip6t_register_table(struct net *net,
goto out; goto out;
} }
/* choose the copy on our node/cpu, but dont care about preemption */ loc_cpu_entry = newinfo->entries;
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size); memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(net, newinfo, loc_cpu_entry, repl); ret = translate_table(net, newinfo, loc_cpu_entry, repl);
...@@ -2125,7 +2113,7 @@ void ip6t_unregister_table(struct net *net, struct xt_table *table) ...@@ -2125,7 +2113,7 @@ void ip6t_unregister_table(struct net *net, struct xt_table *table)
private = xt_unregister_table(table); private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */ /* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()]; loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size) xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter, net); cleanup_entry(iter, net);
if (private->number > private->initial_entries) if (private->number > private->initial_entries)
......
...@@ -206,7 +206,7 @@ config NF_CONNTRACK_FTP ...@@ -206,7 +206,7 @@ config NF_CONNTRACK_FTP
config NF_CONNTRACK_H323 config NF_CONNTRACK_H323
tristate "H.323 protocol support" tristate "H.323 protocol support"
depends on (IPV6 || IPV6=n) depends on IPV6 || IPV6=n
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
help help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most H.323 is a VoIP signalling protocol from ITU-T. As one of the most
...@@ -723,7 +723,7 @@ config NETFILTER_XT_TARGET_HL ...@@ -723,7 +723,7 @@ config NETFILTER_XT_TARGET_HL
config NETFILTER_XT_TARGET_HMARK config NETFILTER_XT_TARGET_HMARK
tristate '"HMARK" target support' tristate '"HMARK" target support'
depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
---help--- ---help---
This option adds the "HMARK" target. This option adds the "HMARK" target.
...@@ -865,7 +865,7 @@ config NETFILTER_XT_TARGET_REDIRECT ...@@ -865,7 +865,7 @@ config NETFILTER_XT_TARGET_REDIRECT
config NETFILTER_XT_TARGET_TEE config NETFILTER_XT_TARGET_TEE
tristate '"TEE" - packet cloning to alternate destination' tristate '"TEE" - packet cloning to alternate destination'
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
depends on (IPV6 || IPV6=n) depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK depends on !NF_CONNTRACK || NF_CONNTRACK
---help--- ---help---
This option adds a "TEE" target with which a packet can be cloned and This option adds a "TEE" target with which a packet can be cloned and
...@@ -875,8 +875,8 @@ config NETFILTER_XT_TARGET_TPROXY ...@@ -875,8 +875,8 @@ config NETFILTER_XT_TARGET_TPROXY
tristate '"TPROXY" target transparent proxying support' tristate '"TPROXY" target transparent proxying support'
depends on NETFILTER_XTABLES depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
depends on (IPV6 || IPV6=n) depends on IPV6 || IPV6=n
depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
depends on IP_NF_MANGLE depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
...@@ -915,7 +915,7 @@ config NETFILTER_XT_TARGET_SECMARK ...@@ -915,7 +915,7 @@ config NETFILTER_XT_TARGET_SECMARK
config NETFILTER_XT_TARGET_TCPMSS config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support' tristate '"TCPMSS" target support'
depends on (IPV6 || IPV6=n) depends on IPV6 || IPV6=n
default m if NETFILTER_ADVANCED=n default m if NETFILTER_ADVANCED=n
---help--- ---help---
This option adds a `TCPMSS' target, which allows you to alter the This option adds a `TCPMSS' target, which allows you to alter the
...@@ -1127,7 +1127,7 @@ config NETFILTER_XT_MATCH_ESP ...@@ -1127,7 +1127,7 @@ config NETFILTER_XT_MATCH_ESP
config NETFILTER_XT_MATCH_HASHLIMIT config NETFILTER_XT_MATCH_HASHLIMIT
tristate '"hashlimit" match support' tristate '"hashlimit" match support'
depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
help help
This option adds a `hashlimit' match. This option adds a `hashlimit' match.
...@@ -1369,8 +1369,8 @@ config NETFILTER_XT_MATCH_SOCKET ...@@ -1369,8 +1369,8 @@ config NETFILTER_XT_MATCH_SOCKET
depends on NETFILTER_XTABLES depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK depends on !NF_CONNTRACK || NF_CONNTRACK
depends on (IPV6 || IPV6=n) depends on IPV6 || IPV6=n
depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help help
......
...@@ -41,7 +41,7 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) ...@@ -41,7 +41,7 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
struct mtype *map = set->data; struct mtype *map = set->data;
init_timer(&map->gc); init_timer(&map->gc);
map->gc.data = (unsigned long) set; map->gc.data = (unsigned long)set;
map->gc.function = gc; map->gc.function = gc;
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc); add_timer(&map->gc);
...@@ -144,10 +144,12 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, ...@@ -144,10 +144,12 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (ret == IPSET_ADD_FAILED) { if (ret == IPSET_ADD_FAILED) {
if (SET_WITH_TIMEOUT(set) && if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(x, set))) ip_set_timeout_expired(ext_timeout(x, set))) {
ret = 0; ret = 0;
else if (!(flags & IPSET_FLAG_EXIST)) } else if (!(flags & IPSET_FLAG_EXIST)) {
set_bit(e->id, map->members);
return -IPSET_ERR_EXIST; return -IPSET_ERR_EXIST;
}
/* Element is re-added, cleanup extensions */ /* Element is re-added, cleanup extensions */
ip_set_ext_destroy(set, x); ip_set_ext_destroy(set, x);
} }
...@@ -165,6 +167,10 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, ...@@ -165,6 +167,10 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
ip_set_init_comment(ext_comment(x, set), ext); ip_set_init_comment(ext_comment(x, set), ext);
if (SET_WITH_SKBINFO(set)) if (SET_WITH_SKBINFO(set))
ip_set_init_skbinfo(ext_skbinfo(x, set), ext); ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
/* Activate element */
set_bit(e->id, map->members);
return 0; return 0;
} }
...@@ -203,10 +209,13 @@ mtype_list(const struct ip_set *set, ...@@ -203,10 +209,13 @@ mtype_list(const struct ip_set *set,
struct nlattr *adt, *nested; struct nlattr *adt, *nested;
void *x; void *x;
u32 id, first = cb->args[IPSET_CB_ARG0]; u32 id, first = cb->args[IPSET_CB_ARG0];
int ret = 0;
adt = ipset_nest_start(skb, IPSET_ATTR_ADT); adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt) if (!adt)
return -EMSGSIZE; return -EMSGSIZE;
/* Extensions may be replaced */
rcu_read_lock();
for (; cb->args[IPSET_CB_ARG0] < map->elements; for (; cb->args[IPSET_CB_ARG0] < map->elements;
cb->args[IPSET_CB_ARG0]++) { cb->args[IPSET_CB_ARG0]++) {
id = cb->args[IPSET_CB_ARG0]; id = cb->args[IPSET_CB_ARG0];
...@@ -214,7 +223,7 @@ mtype_list(const struct ip_set *set, ...@@ -214,7 +223,7 @@ mtype_list(const struct ip_set *set,
if (!test_bit(id, map->members) || if (!test_bit(id, map->members) ||
(SET_WITH_TIMEOUT(set) && (SET_WITH_TIMEOUT(set) &&
#ifdef IP_SET_BITMAP_STORED_TIMEOUT #ifdef IP_SET_BITMAP_STORED_TIMEOUT
mtype_is_filled((const struct mtype_elem *) x) && mtype_is_filled((const struct mtype_elem *)x) &&
#endif #endif
ip_set_timeout_expired(ext_timeout(x, set)))) ip_set_timeout_expired(ext_timeout(x, set))))
continue; continue;
...@@ -222,14 +231,16 @@ mtype_list(const struct ip_set *set, ...@@ -222,14 +231,16 @@ mtype_list(const struct ip_set *set,
if (!nested) { if (!nested) {
if (id == first) { if (id == first) {
nla_nest_cancel(skb, adt); nla_nest_cancel(skb, adt);
return -EMSGSIZE; ret = -EMSGSIZE;
} else goto out;
}
goto nla_put_failure; goto nla_put_failure;
} }
if (mtype_do_list(skb, map, id, set->dsize)) if (mtype_do_list(skb, map, id, set->dsize))
goto nla_put_failure; goto nla_put_failure;
if (ip_set_put_extensions(skb, set, x, if (ip_set_put_extensions(skb, set, x,
mtype_is_filled((const struct mtype_elem *) x))) mtype_is_filled((const struct mtype_elem *)x)))
goto nla_put_failure; goto nla_put_failure;
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
} }
...@@ -238,29 +249,32 @@ mtype_list(const struct ip_set *set, ...@@ -238,29 +249,32 @@ mtype_list(const struct ip_set *set,
/* Set listing finished */ /* Set listing finished */
cb->args[IPSET_CB_ARG0] = 0; cb->args[IPSET_CB_ARG0] = 0;
return 0; goto out;
nla_put_failure: nla_put_failure:
nla_nest_cancel(skb, nested); nla_nest_cancel(skb, nested);
if (unlikely(id == first)) { if (unlikely(id == first)) {
cb->args[IPSET_CB_ARG0] = 0; cb->args[IPSET_CB_ARG0] = 0;
return -EMSGSIZE; ret = -EMSGSIZE;
} }
ipset_nest_end(skb, adt); ipset_nest_end(skb, adt);
return 0; out:
rcu_read_unlock();
return ret;
} }
static void static void
mtype_gc(unsigned long ul_set) mtype_gc(unsigned long ul_set)
{ {
struct ip_set *set = (struct ip_set *) ul_set; struct ip_set *set = (struct ip_set *)ul_set;
struct mtype *map = set->data; struct mtype *map = set->data;
void *x; void *x;
u32 id; u32 id;
/* We run parallel with other readers (test element) /* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */ * but adding/deleting new entries is locked out
read_lock_bh(&set->lock); */
spin_lock_bh(&set->lock);
for (id = 0; id < map->elements; id++) for (id = 0; id < map->elements; id++)
if (mtype_gc_test(id, map, set->dsize)) { if (mtype_gc_test(id, map, set->dsize)) {
x = get_ext(set, map, id); x = get_ext(set, map, id);
...@@ -269,7 +283,7 @@ mtype_gc(unsigned long ul_set) ...@@ -269,7 +283,7 @@ mtype_gc(unsigned long ul_set)
ip_set_ext_destroy(set, x); ip_set_ext_destroy(set, x);
} }
} }
read_unlock_bh(&set->lock); spin_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc); add_timer(&map->gc);
......
...@@ -59,7 +59,7 @@ struct bitmap_ip_adt_elem { ...@@ -59,7 +59,7 @@ struct bitmap_ip_adt_elem {
static inline u32 static inline u32
ip_to_id(const struct bitmap_ip *m, u32 ip) ip_to_id(const struct bitmap_ip *m, u32 ip)
{ {
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts; return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip) / m->hosts;
} }
/* Common functions */ /* Common functions */
...@@ -81,7 +81,7 @@ static inline int ...@@ -81,7 +81,7 @@ static inline int
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map, bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
u32 flags, size_t dsize) u32 flags, size_t dsize)
{ {
return !!test_and_set_bit(e->id, map->members); return !!test_bit(e->id, map->members);
} }
static inline int static inline int
...@@ -138,18 +138,12 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -138,18 +138,12 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret) if (ret)
return ret; return ret;
...@@ -181,8 +175,9 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -181,8 +175,9 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
if (!cidr || cidr > HOST_MASK) if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr); ip_set_mask_from_to(ip, ip_to, cidr);
} else } else {
ip_to = ip; ip_to = ip;
}
if (ip_to > map->last_ip) if (ip_to > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
...@@ -193,7 +188,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -193,7 +188,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -284,8 +279,9 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], ...@@ -284,8 +279,9 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (cidr >= HOST_MASK) if (cidr >= HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(first_ip, last_ip, cidr); ip_set_mask_from_to(first_ip, last_ip, cidr);
} else } else {
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
}
if (tb[IPSET_ATTR_NETMASK]) { if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
...@@ -382,6 +378,7 @@ bitmap_ip_init(void) ...@@ -382,6 +378,7 @@ bitmap_ip_init(void)
static void __exit static void __exit
bitmap_ip_fini(void) bitmap_ip_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&bitmap_ip_type); ip_set_type_unregister(&bitmap_ip_type);
} }
......
...@@ -90,7 +90,7 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e, ...@@ -90,7 +90,7 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
return 0; return 0;
elem = get_elem(map->extensions, e->id, dsize); elem = get_elem(map->extensions, e->id, dsize);
if (elem->filled == MAC_FILLED) if (elem->filled == MAC_FILLED)
return e->ether == NULL || return !e->ether ||
ether_addr_equal(e->ether, elem->ether); ether_addr_equal(e->ether, elem->ether);
/* Trigger kernel to fill out the ethernet address */ /* Trigger kernel to fill out the ethernet address */
return -EAGAIN; return -EAGAIN;
...@@ -131,7 +131,8 @@ bitmap_ipmac_add_timeout(unsigned long *timeout, ...@@ -131,7 +131,8 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
/* If MAC is unset yet, we store plain timeout value /* If MAC is unset yet, we store plain timeout value
* because the timer is not activated yet * because the timer is not activated yet
* and we can reuse it later when MAC is filled out, * and we can reuse it later when MAC is filled out,
* possibly by the kernel */ * possibly by the kernel
*/
if (e->ether) if (e->ether)
ip_set_timeout_set(timeout, t); ip_set_timeout_set(timeout, t);
else else
...@@ -147,28 +148,35 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e, ...@@ -147,28 +148,35 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac_elem *elem; struct bitmap_ipmac_elem *elem;
elem = get_elem(map->extensions, e->id, dsize); elem = get_elem(map->extensions, e->id, dsize);
if (test_and_set_bit(e->id, map->members)) { if (test_bit(e->id, map->members)) {
if (elem->filled == MAC_FILLED) { if (elem->filled == MAC_FILLED) {
if (e->ether && (flags & IPSET_FLAG_EXIST)) if (e->ether &&
memcpy(elem->ether, e->ether, ETH_ALEN); (flags & IPSET_FLAG_EXIST) &&
!ether_addr_equal(e->ether, elem->ether)) {
/* memcpy isn't atomic */
clear_bit(e->id, map->members);
smp_mb__after_atomic();
ether_addr_copy(elem->ether, e->ether);
}
return IPSET_ADD_FAILED; return IPSET_ADD_FAILED;
} else if (!e->ether) } else if (!e->ether)
/* Already added without ethernet address */ /* Already added without ethernet address */
return IPSET_ADD_FAILED; return IPSET_ADD_FAILED;
/* Fill the MAC address and trigger the timer activation */ /* Fill the MAC address and trigger the timer activation */
memcpy(elem->ether, e->ether, ETH_ALEN); clear_bit(e->id, map->members);
smp_mb__after_atomic();
ether_addr_copy(elem->ether, e->ether);
elem->filled = MAC_FILLED; elem->filled = MAC_FILLED;
return IPSET_ADD_START_STORED_TIMEOUT; return IPSET_ADD_START_STORED_TIMEOUT;
} else if (e->ether) { } else if (e->ether) {
/* We can store MAC too */ /* We can store MAC too */
memcpy(elem->ether, e->ether, ETH_ALEN); ether_addr_copy(elem->ether, e->ether);
elem->filled = MAC_FILLED; elem->filled = MAC_FILLED;
return 0; return 0;
} else { }
elem->filled = MAC_UNSET; elem->filled = MAC_UNSET;
/* MAC is not stored yet, don't start timer */ /* MAC is not stored yet, don't start timer */
return IPSET_ADD_STORE_PLAIN_TIMEOUT; return IPSET_ADD_STORE_PLAIN_TIMEOUT;
}
} }
static inline int static inline int
...@@ -239,18 +247,12 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -239,18 +247,12 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
u32 ip = 0; u32 ip = 0;
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret) if (ret)
return ret; return ret;
...@@ -350,8 +352,9 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], ...@@ -350,8 +352,9 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (cidr >= HOST_MASK) if (cidr >= HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(first_ip, last_ip, cidr); ip_set_mask_from_to(first_ip, last_ip, cidr);
} else } else {
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
}
elements = (u64)last_ip - first_ip + 1; elements = (u64)last_ip - first_ip + 1;
...@@ -419,6 +422,7 @@ bitmap_ipmac_init(void) ...@@ -419,6 +422,7 @@ bitmap_ipmac_init(void)
static void __exit static void __exit
bitmap_ipmac_fini(void) bitmap_ipmac_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&bitmap_ipmac_type); ip_set_type_unregister(&bitmap_ipmac_type);
} }
......
...@@ -73,7 +73,7 @@ static inline int ...@@ -73,7 +73,7 @@ static inline int
bitmap_port_do_add(const struct bitmap_port_adt_elem *e, bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
struct bitmap_port *map, u32 flags, size_t dsize) struct bitmap_port *map, u32 flags, size_t dsize)
{ {
return !!test_and_set_bit(e->id, map->members); return !!test_bit(e->id, map->members);
} }
static inline int static inline int
...@@ -136,19 +136,13 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -136,19 +136,13 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
u16 port_to; u16 port_to;
int ret = 0; int ret = 0;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
return -IPSET_ERR_PROTOCOL;
port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
if (port < map->first_port || port > map->last_port) if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
...@@ -168,8 +162,9 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -168,8 +162,9 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
if (port < map->first_port) if (port < map->first_port)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
} }
} else } else {
port_to = port; port_to = port;
}
if (port_to > map->last_port) if (port_to > map->last_port)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
...@@ -180,7 +175,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -180,7 +175,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -312,6 +307,7 @@ bitmap_port_init(void) ...@@ -312,6 +307,7 @@ bitmap_port_init(void)
static void __exit static void __exit
bitmap_port_fini(void) bitmap_port_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&bitmap_port_type); ip_set_type_unregister(&bitmap_port_type);
} }
......
This diff is collapsed.
...@@ -30,7 +30,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, ...@@ -30,7 +30,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
const struct tcphdr *th; const struct tcphdr *th;
th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph); th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
if (th == NULL) if (!th)
/* No choice either */ /* No choice either */
return false; return false;
...@@ -42,7 +42,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, ...@@ -42,7 +42,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
const sctp_sctphdr_t *sh; const sctp_sctphdr_t *sh;
sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
if (sh == NULL) if (!sh)
/* No choice either */ /* No choice either */
return false; return false;
...@@ -55,7 +55,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, ...@@ -55,7 +55,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
const struct udphdr *uh; const struct udphdr *uh;
uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph); uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
if (uh == NULL) if (!uh)
/* No choice either */ /* No choice either */
return false; return false;
...@@ -67,7 +67,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, ...@@ -67,7 +67,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
const struct icmphdr *ic; const struct icmphdr *ic;
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
if (ic == NULL) if (!ic)
return false; return false;
*port = (__force __be16)htons((ic->type << 8) | ic->code); *port = (__force __be16)htons((ic->type << 8) | ic->code);
...@@ -78,7 +78,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, ...@@ -78,7 +78,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
const struct icmp6hdr *ic; const struct icmp6hdr *ic;
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
if (ic == NULL) if (!ic)
return false; return false;
*port = (__force __be16) *port = (__force __be16)
...@@ -116,7 +116,8 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src, ...@@ -116,7 +116,8 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
return false; return false;
default: default:
/* Other protocols doesn't have ports, /* Other protocols doesn't have ports,
so we can match fragments */ * so we can match fragments.
*/
*proto = protocol; *proto = protocol;
return true; return true;
} }
......
This diff is collapsed.
...@@ -108,18 +108,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -108,18 +108,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
u32 ip = 0, ip_to = 0, hosts; u32 ip = 0, ip_to = 0, hosts;
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret) if (ret)
return ret; return ret;
...@@ -164,7 +158,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -164,7 +158,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -246,20 +240,20 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -246,20 +240,20 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
return ret; return ret;
...@@ -321,6 +315,7 @@ hash_ip_init(void) ...@@ -321,6 +315,7 @@ hash_ip_init(void)
static void __exit static void __exit
hash_ip_fini(void) hash_ip_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_ip_type); ip_set_type_unregister(&hash_ip_type);
} }
......
...@@ -108,19 +108,13 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -108,19 +108,13 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
u32 ip, ip_to = 0; u32 ip, ip_to = 0;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
return ret; return ret;
...@@ -161,7 +155,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -161,7 +155,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -212,7 +206,6 @@ hash_ipmark6_data_next(struct hash_ipmark4_elem *next, ...@@ -212,7 +206,6 @@ hash_ipmark6_data_next(struct hash_ipmark4_elem *next,
#define IP_SET_EMIT_CREATE #define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h" #include "ip_set_hash_gen.h"
static int static int
hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
...@@ -240,20 +233,20 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -240,20 +233,20 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK) || !ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (tb[IPSET_ATTR_LINENO]) if (cidr != HOST_MASK)
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
...@@ -274,10 +267,8 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -274,10 +267,8 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0;
return ret; return 0;
} }
static struct ip_set_type hash_ipmark_type __read_mostly = { static struct ip_set_type hash_ipmark_type __read_mostly = {
...@@ -325,6 +316,7 @@ hash_ipmark_init(void) ...@@ -325,6 +316,7 @@ hash_ipmark_init(void)
static void __exit static void __exit
hash_ipmark_fini(void) hash_ipmark_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_ipmark_type); ip_set_type_unregister(&hash_ipmark_type);
} }
......
...@@ -116,20 +116,14 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -116,20 +116,14 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
bool with_ports = false; bool with_ports = false;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
return ret; return ret;
...@@ -146,8 +140,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -146,8 +140,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else {
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0; e.port = 0;
...@@ -193,7 +188,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -193,7 +188,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
} }
...@@ -279,21 +274,21 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -279,21 +274,21 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
bool with_ports = false; bool with_ports = false;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (tb[IPSET_ATTR_LINENO]) if (cidr != HOST_MASK)
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
...@@ -311,8 +306,9 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -311,8 +306,9 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else {
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0; e.port = 0;
...@@ -335,7 +331,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -335,7 +331,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -388,6 +384,7 @@ hash_ipport_init(void) ...@@ -388,6 +384,7 @@ hash_ipport_init(void)
static void __exit static void __exit
hash_ipport_fini(void) hash_ipport_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_ipport_type); ip_set_type_unregister(&hash_ipport_type);
} }
......
...@@ -119,20 +119,14 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -119,20 +119,14 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
bool with_ports = false; bool with_ports = false;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
return ret; return ret;
...@@ -153,8 +147,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -153,8 +147,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else {
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0; e.port = 0;
...@@ -200,7 +195,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -200,7 +195,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
} }
...@@ -290,21 +285,21 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -290,21 +285,21 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
bool with_ports = false; bool with_ports = false;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (tb[IPSET_ATTR_LINENO]) if (cidr != HOST_MASK)
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
...@@ -326,8 +321,9 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -326,8 +321,9 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else {
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0; e.port = 0;
...@@ -350,7 +346,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -350,7 +346,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -403,6 +399,7 @@ hash_ipportip_init(void) ...@@ -403,6 +399,7 @@ hash_ipportip_init(void)
static void __exit static void __exit
hash_ipportip_fini(void) hash_ipportip_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_ipportip_type); ip_set_type_unregister(&hash_ipportip_type);
} }
......
...@@ -141,7 +141,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -141,7 +141,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_ipportnet *h = set->data; const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem e = { struct hash_ipportnet4_elem e = {
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1, .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
...@@ -173,21 +173,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -173,21 +173,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
u8 cidr; u8 cidr;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret) if (ret)
return ret; return ret;
...@@ -215,14 +209,16 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -215,14 +209,16 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else {
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0; e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
...@@ -269,8 +265,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -269,8 +265,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(ip2_from, ip2_to); swap(ip2_from, ip2_to);
if (ip2_from + UINT_MAX == ip2_to) if (ip2_from + UINT_MAX == ip2_to)
return -IPSET_ERR_HASH_RANGE; return -IPSET_ERR_HASH_RANGE;
} else } else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1); ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
}
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
...@@ -293,7 +290,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -293,7 +290,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
ip2 = ip2_last + 1; ip2 = ip2_last + 1;
} }
...@@ -395,7 +392,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -395,7 +392,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_ipportnet *h = set->data; const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem e = { struct hash_ipportnet6_elem e = {
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1, .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
...@@ -426,24 +423,22 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -426,24 +423,22 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
u8 cidr; u8 cidr;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO])) if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (tb[IPSET_ATTR_LINENO]) if (cidr != HOST_MASK)
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
...@@ -474,14 +469,16 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -474,14 +469,16 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else {
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0; e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
...@@ -505,7 +502,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -505,7 +502,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -562,6 +559,7 @@ hash_ipportnet_init(void) ...@@ -562,6 +559,7 @@ hash_ipportnet_init(void)
static void __exit static void __exit
hash_ipportnet_fini(void) hash_ipportnet_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_ipportnet_type); ip_set_type_unregister(&hash_ipportnet_type);
} }
......
...@@ -92,7 +92,7 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -92,7 +92,7 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
(skb_mac_header(skb) + ETH_HLEN) > skb->data) (skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL; return -EINVAL;
memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN); ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0) if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
return -EINVAL; return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
...@@ -107,22 +107,16 @@ hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -107,22 +107,16 @@ hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_ETHER] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_ETHER]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_extensions(set, tb, &ext); ret = ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]));
if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0) if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
return -IPSET_ERR_HASH_ELEM; return -IPSET_ERR_HASH_ELEM;
...@@ -171,6 +165,7 @@ hash_mac_init(void) ...@@ -171,6 +165,7 @@ hash_mac_init(void)
static void __exit static void __exit
hash_mac_fini(void) hash_mac_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_mac_type); ip_set_type_unregister(&hash_mac_type);
} }
......
...@@ -120,7 +120,7 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -120,7 +120,7 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_net *h = set->data; const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = { struct hash_net4_elem e = {
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
...@@ -146,19 +146,13 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -146,19 +146,13 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
u32 ip = 0, ip_to = 0, last; u32 ip = 0, ip_to = 0, last;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret) if (ret)
return ret; return ret;
...@@ -175,6 +169,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -175,6 +169,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
...@@ -182,7 +177,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -182,7 +177,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr)); e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret: return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret; ip_set_eexist(ret, flags) ? 0 : ret;
} }
...@@ -204,7 +199,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -204,7 +199,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else
ret = 0; ret = 0;
ip = last + 1; ip = last + 1;
} }
...@@ -294,7 +289,7 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -294,7 +289,7 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_net *h = set->data; const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem e = { struct hash_net6_elem e = {
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
...@@ -318,21 +313,15 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -318,21 +313,15 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret; int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO])) if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret) if (ret)
return ret; return ret;
...@@ -341,16 +330,17 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -341,16 +330,17 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_CIDR]) if (tb[IPSET_ATTR_CIDR]) {
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr || e.cidr > HOST_MASK) if (!e.cidr || e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
}
ip6_netmask(&e.ip, e.cidr); ip6_netmask(&e.ip, e.cidr);
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
...@@ -404,6 +394,7 @@ hash_net_init(void) ...@@ -404,6 +394,7 @@ hash_net_init(void)
static void __exit static void __exit
hash_net_fini(void) hash_net_fini(void)
{ {
rcu_barrier();
ip_set_type_unregister(&hash_net_type); ip_set_type_unregister(&hash_net_type);
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include <linux/export.h> #include <linux/export.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
/* /* Prefixlen maps for fast conversions, by Jan Engelhardt. */
* Prefixlen maps for fast conversions, by Jan Engelhardt.
*/
#define E(a, b, c, d) \ #define E(a, b, c, d) \
{.ip6 = { \ {.ip6 = { \
...@@ -11,8 +9,7 @@ ...@@ -11,8 +9,7 @@
htonl(c), htonl(d), \ htonl(c), htonl(d), \
} } } }
/* /* This table works for both IPv4 and IPv6;
* This table works for both IPv4 and IPv6;
* just use prefixlen_netmask_map[prefixlength].ip. * just use prefixlen_netmask_map[prefixlength].ip.
*/ */
const union nf_inet_addr ip_set_netmask_map[] = { const union nf_inet_addr ip_set_netmask_map[] = {
...@@ -150,12 +147,11 @@ EXPORT_SYMBOL_GPL(ip_set_netmask_map); ...@@ -150,12 +147,11 @@ EXPORT_SYMBOL_GPL(ip_set_netmask_map);
#undef E #undef E
#define E(a, b, c, d) \ #define E(a, b, c, d) \
{.ip6 = { (__force __be32) a, (__force __be32) b, \ {.ip6 = { (__force __be32)a, (__force __be32)b, \
(__force __be32) c, (__force __be32) d, \ (__force __be32)c, (__force __be32)d, \
} } } }
/* /* This table works for both IPv4 and IPv6;
* This table works for both IPv4 and IPv6;
* just use prefixlen_hostmask_map[prefixlength].ip. * just use prefixlen_hostmask_map[prefixlength].ip.
*/ */
const union nf_inet_addr ip_set_hostmask_map[] = { const union nf_inet_addr ip_set_hostmask_map[] = {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment