Commit 8034e1ef authored by David S. Miller's avatar David S. Miller
parents bffae697 b16c2919
...@@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, ...@@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto); const struct nf_conntrack_l4proto *proto);
#ifdef CONFIG_LOCKDEP #define CONNTRACK_LOCKS 1024
# define CONNTRACK_LOCKS 8
#else
# define CONNTRACK_LOCKS 1024
#endif
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
extern spinlock_t nf_conntrack_expect_lock; extern spinlock_t nf_conntrack_expect_lock;
......
...@@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (e.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
e.cidr = HOST_MASK; e.cidr = HOST_MASK;
...@@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (e.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
e.cidr = HOST_MASK; e.cidr = HOST_MASK;
......
...@@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks); ...@@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
static __read_mostly bool nf_conntrack_locks_all;
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
spin_lock(lock);
while (unlikely(nf_conntrack_locks_all)) {
spin_unlock(lock);
spin_lock(&nf_conntrack_locks_all_lock);
spin_unlock(&nf_conntrack_locks_all_lock);
spin_lock(lock);
}
}
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
{ {
h1 %= CONNTRACK_LOCKS; h1 %= CONNTRACK_LOCKS;
...@@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, ...@@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
h1 %= CONNTRACK_LOCKS; h1 %= CONNTRACK_LOCKS;
h2 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS;
if (h1 <= h2) { if (h1 <= h2) {
spin_lock(&nf_conntrack_locks[h1]); nf_conntrack_lock(&nf_conntrack_locks[h1]);
if (h1 != h2) if (h1 != h2)
spin_lock_nested(&nf_conntrack_locks[h2], spin_lock_nested(&nf_conntrack_locks[h2],
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
} else { } else {
spin_lock(&nf_conntrack_locks[h2]); nf_conntrack_lock(&nf_conntrack_locks[h2]);
spin_lock_nested(&nf_conntrack_locks[h1], spin_lock_nested(&nf_conntrack_locks[h1],
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
} }
...@@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void) ...@@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void)
{ {
int i; int i;
for (i = 0; i < CONNTRACK_LOCKS; i++) spin_lock(&nf_conntrack_locks_all_lock);
spin_lock_nested(&nf_conntrack_locks[i], i); nf_conntrack_locks_all = true;
for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_lock(&nf_conntrack_locks[i]);
spin_unlock(&nf_conntrack_locks[i]);
}
} }
static void nf_conntrack_all_unlock(void) static void nf_conntrack_all_unlock(void)
{ {
int i; nf_conntrack_locks_all = false;
spin_unlock(&nf_conntrack_locks_all_lock);
for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_unlock(&nf_conntrack_locks[i]);
} }
unsigned int nf_conntrack_htable_size __read_mostly; unsigned int nf_conntrack_htable_size __read_mostly;
...@@ -757,7 +775,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash) ...@@ -757,7 +775,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
hash = hash_bucket(_hash, net); hash = hash_bucket(_hash, net);
for (; i < net->ct.htable_size; i++) { for (; i < net->ct.htable_size; i++) {
lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
spin_lock(lockp); nf_conntrack_lock(lockp);
if (read_seqcount_retry(&net->ct.generation, sequence)) { if (read_seqcount_retry(&net->ct.generation, sequence)) {
spin_unlock(lockp); spin_unlock(lockp);
goto restart; goto restart;
...@@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), ...@@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
for (; *bucket < net->ct.htable_size; (*bucket)++) { for (; *bucket < net->ct.htable_size; (*bucket)++) {
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
local_bh_disable(); local_bh_disable();
spin_lock(lockp); nf_conntrack_lock(lockp);
if (*bucket < net->ct.htable_size) { if (*bucket < net->ct.htable_size) {
hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
......
...@@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, ...@@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
} }
local_bh_disable(); local_bh_disable();
for (i = 0; i < net->ct.htable_size; i++) { for (i = 0; i < net->ct.htable_size; i++) {
spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
if (i < net->ct.htable_size) { if (i < net->ct.htable_size) {
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
unhelp(h, me); unhelp(h, me);
......
...@@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart: restart:
lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
spin_lock(lockp); nf_conntrack_lock(lockp);
if (cb->args[0] >= net->ct.htable_size) { if (cb->args[0] >= net->ct.htable_size) {
spin_unlock(lockp); spin_unlock(lockp);
goto out; goto out;
......
...@@ -224,12 +224,12 @@ static int __init nf_tables_netdev_init(void) ...@@ -224,12 +224,12 @@ static int __init nf_tables_netdev_init(void)
nft_register_chain_type(&nft_filter_chain_netdev); nft_register_chain_type(&nft_filter_chain_netdev);
ret = register_pernet_subsys(&nf_tables_netdev_net_ops); ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
if (ret < 0) if (ret < 0) {
nft_unregister_chain_type(&nft_filter_chain_netdev); nft_unregister_chain_type(&nft_filter_chain_netdev);
return ret;
}
register_netdevice_notifier(&nf_tables_netdev_notifier); register_netdevice_notifier(&nf_tables_netdev_notifier);
return 0;
return ret;
} }
static void __exit nf_tables_netdev_exit(void) static void __exit nf_tables_netdev_exit(void)
......
...@@ -307,12 +307,12 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout) ...@@ -307,12 +307,12 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
local_bh_disable(); local_bh_disable();
for (i = 0; i < net->ct.htable_size; i++) { for (i = 0; i < net->ct.htable_size; i++) {
spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
if (i < net->ct.htable_size) { if (i < net->ct.htable_size) {
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
untimeout(h, timeout); untimeout(h, timeout);
} }
spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
} }
local_bh_enable(); local_bh_enable();
} }
......
...@@ -46,16 +46,14 @@ static void nft_byteorder_eval(const struct nft_expr *expr, ...@@ -46,16 +46,14 @@ static void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) { switch (priv->op) {
case NFT_BYTEORDER_NTOH: case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 8; i++) { for (i = 0; i < priv->len / 8; i++) {
src64 = get_unaligned_be64(&src[i]); src64 = get_unaligned((u64 *)&src[i]);
src64 = be64_to_cpu((__force __be64)src64);
put_unaligned_be64(src64, &dst[i]); put_unaligned_be64(src64, &dst[i]);
} }
break; break;
case NFT_BYTEORDER_HTON: case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 8; i++) { for (i = 0; i < priv->len / 8; i++) {
src64 = get_unaligned_be64(&src[i]); src64 = get_unaligned_be64(&src[i]);
src64 = (__force u64)cpu_to_be64(src64); put_unaligned(src64, (u64 *)&dst[i]);
put_unaligned_be64(src64, &dst[i]);
} }
break; break;
} }
......
...@@ -127,6 +127,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, ...@@ -127,6 +127,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
NF_CT_LABELS_MAX_SIZE - size); NF_CT_LABELS_MAX_SIZE - size);
return; return;
} }
#endif
case NFT_CT_BYTES: /* fallthrough */ case NFT_CT_BYTES: /* fallthrough */
case NFT_CT_PKTS: { case NFT_CT_PKTS: {
const struct nf_conn_acct *acct = nf_conn_acct_find(ct); const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
...@@ -138,7 +139,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr, ...@@ -138,7 +139,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
memcpy(dest, &count, sizeof(count)); memcpy(dest, &count, sizeof(count));
return; return;
} }
#endif
default: default:
break; break;
} }
......
...@@ -228,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -228,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{ {
struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb);
u8 nexthdr; u8 nexthdr;
__be16 frag_off; __be16 frag_off, oldlen, newlen;
int tcphoff; int tcphoff;
int ret; int ret;
...@@ -244,7 +244,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -244,7 +244,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
return NF_DROP; return NF_DROP;
if (ret > 0) { if (ret > 0) {
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret); oldlen = ipv6h->payload_len;
newlen = htons(ntohs(oldlen) + ret);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(csum_sub(skb->csum, oldlen),
newlen);
ipv6h->payload_len = newlen;
} }
return XT_CONTINUE; return XT_CONTINUE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment