Commit 77621f02 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter/IPVS fixes for net

The following patchset contains Netfilter/IPVS fixes for your net tree,
they are:

1) Fix SIP conntrack with phones sending session descriptions for different
   media types but same port numbers, from Florian Westphal.

2) Fix incorrect rtnl_lock mutex logic from IPVS sync thread, from Julian
   Anastasov.

3) Skip compat array allocation in ebtables if there is no entries, also
   from Florian.

4) Do not lose left/right bits when shifting marks from xt_connmark, from
   Jack Ma.

5) Silence false positive memleak in conntrack extensions, from Cong Wang.

6) Fix CONFIG_NF_REJECT_IPV6=m link problems, from Arnd Bergmann.

7) Cannot kfree rule that is already in list in nf_tables, switch order
   so this error handling is not required, from Florian Westphal.

8) Release set name in error path, from Florian.

9) include kmemleak.h in nf_conntrack_extend.c, from Stepheh Rothwell.

10) NAT chain and extensions depend on NF_TABLES.

11) Out of bound access when renaming chains, from Taehee Yoo.

12) Incorrect casting in xt_connmark leads to wrong bitshifting.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents aa8f8778 5a786232
...@@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info, ...@@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info,
{ {
unsigned int size = info->entries_size; unsigned int size = info->entries_size;
const void *entries = info->entries; const void *entries = info->entries;
int ret;
newinfo->entries_size = size; newinfo->entries_size = size;
if (info->nentries) {
ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
info->nentries);
if (ret) if (ret)
return ret; return ret;
}
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo); entries, newinfo);
......
...@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6 ...@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
fields such as the source, destination, flowlabel, hop-limit and fields such as the source, destination, flowlabel, hop-limit and
the packet mark. the packet mark.
if NF_NAT_IPV6
config NFT_CHAIN_NAT_IPV6
tristate "IPv6 nf_tables nat chain support"
help
This option enables the "nat" chain for IPv6 in nf_tables. This
chain type is used to perform Network Address Translation (NAT)
packet transformations such as the source, destination address and
source and destination ports.
config NFT_MASQ_IPV6
tristate "IPv6 masquerade support for nf_tables"
depends on NFT_MASQ
select NF_NAT_MASQUERADE_IPV6
help
This is the expression that provides IPv4 masquerading support for
nf_tables.
config NFT_REDIR_IPV6
tristate "IPv6 redirect support for nf_tables"
depends on NFT_REDIR
select NF_NAT_REDIRECT
help
This is the expression that provides IPv4 redirect support for
nf_tables.
endif # NF_NAT_IPV6
config NFT_REJECT_IPV6 config NFT_REJECT_IPV6
select NF_REJECT_IPV6 select NF_REJECT_IPV6
default NFT_REJECT default NFT_REJECT
...@@ -107,39 +135,12 @@ config NF_NAT_IPV6 ...@@ -107,39 +135,12 @@ config NF_NAT_IPV6
if NF_NAT_IPV6 if NF_NAT_IPV6
config NFT_CHAIN_NAT_IPV6
depends on NF_TABLES_IPV6
tristate "IPv6 nf_tables nat chain support"
help
This option enables the "nat" chain for IPv6 in nf_tables. This
chain type is used to perform Network Address Translation (NAT)
packet transformations such as the source, destination address and
source and destination ports.
config NF_NAT_MASQUERADE_IPV6 config NF_NAT_MASQUERADE_IPV6
tristate "IPv6 masquerade support" tristate "IPv6 masquerade support"
help help
This is the kernel functionality to provide NAT in the masquerade This is the kernel functionality to provide NAT in the masquerade
flavour (automatic source address selection) for IPv6. flavour (automatic source address selection) for IPv6.
config NFT_MASQ_IPV6
tristate "IPv6 masquerade support for nf_tables"
depends on NF_TABLES_IPV6
depends on NFT_MASQ
select NF_NAT_MASQUERADE_IPV6
help
This is the expression that provides IPv4 masquerading support for
nf_tables.
config NFT_REDIR_IPV6
tristate "IPv6 redirect support for nf_tables"
depends on NF_TABLES_IPV6
depends on NFT_REDIR
select NF_NAT_REDIRECT
help
This is the expression that provides IPv4 redirect support for
nf_tables.
endif # NF_NAT_IPV6 endif # NF_NAT_IPV6
config IP6_NF_IPTABLES config IP6_NF_IPTABLES
......
...@@ -594,6 +594,7 @@ config NFT_QUOTA ...@@ -594,6 +594,7 @@ config NFT_QUOTA
config NFT_REJECT config NFT_REJECT
default m if NETFILTER_ADVANCED=n default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support" tristate "Netfilter nf_tables reject support"
depends on !NF_TABLES_INET || (IPV6!=m || m)
help help
This option adds the "reject" expression that you can use to This option adds the "reject" expression that you can use to
explicitly deny and notify via TCP reset/ICMP informational errors explicitly deny and notify via TCP reset/ICMP informational errors
......
...@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) ...@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
strlcpy(cfg.mcast_ifn, dm->mcast_ifn, strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn)); sizeof(cfg.mcast_ifn));
cfg.syncid = dm->syncid; cfg.syncid = dm->syncid;
rtnl_lock();
mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &cfg, dm->state); ret = start_sync_thread(ipvs, &cfg, dm->state);
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
} else { } else {
mutex_lock(&ipvs->sync_mutex); mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state); ret = stop_sync_thread(ipvs, dm->state);
...@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) ...@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (ipvs->mixed_address_family_dests > 0) if (ipvs->mixed_address_family_dests > 0)
return -EINVAL; return -EINVAL;
rtnl_lock();
mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &c, ret = start_sync_thread(ipvs, &c,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, ...@@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
static inline int expect_matches(const struct nf_conntrack_expect *a, static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b) const struct nf_conntrack_expect *b)
{ {
return a->master == b->master && a->class == b->class && return a->master == b->master &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
...@@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) ...@@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
h = nf_ct_expect_dst_hash(net, &expect->tuple); h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (expect_matches(i, expect)) { if (expect_matches(i, expect)) {
if (i->class != expect->class)
return -EALREADY;
if (nf_ct_remove_expect(i)) if (nf_ct_remove_expect(i))
break; break;
} else if (expect_clash(i, expect)) { } else if (expect_clash(i, expect)) {
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
...@@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) ...@@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
rcu_read_unlock(); rcu_read_unlock();
alloc = max(newlen, NF_CT_EXT_PREALLOC); alloc = max(newlen, NF_CT_EXT_PREALLOC);
kmemleak_not_leak(old);
new = __krealloc(old, alloc, gfp); new = __krealloc(old, alloc, gfp);
if (!new) if (!new)
return NULL; return NULL;
......
...@@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, ...@@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
datalen, rtp_exp, rtcp_exp, datalen, rtp_exp, rtcp_exp,
mediaoff, medialen, daddr); mediaoff, medialen, daddr);
else { else {
if (nf_ct_expect_related(rtp_exp) == 0) { /* -EALREADY handling works around end-points that send
if (nf_ct_expect_related(rtcp_exp) != 0) * SDP messages with identical port but different media type,
nf_ct_unexpect_related(rtp_exp); * we pretend expectation was set up.
else */
int errp = nf_ct_expect_related(rtp_exp);
if (errp == 0 || errp == -EALREADY) {
int errcp = nf_ct_expect_related(rtcp_exp);
if (errcp == 0 || errcp == -EALREADY)
ret = NF_ACCEPT; ret = NF_ACCEPT;
else if (errp == 0)
nf_ct_unexpect_related(rtp_exp);
} }
} }
nf_ct_expect_put(rtcp_exp); nf_ct_expect_put(rtcp_exp);
......
...@@ -2361,7 +2361,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, ...@@ -2361,7 +2361,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
} }
if (nlh->nlmsg_flags & NLM_F_REPLACE) { if (nlh->nlmsg_flags & NLM_F_REPLACE) {
if (nft_is_active_next(net, old_rule)) { if (!nft_is_active_next(net, old_rule)) {
err = -ENOENT;
goto err2;
}
trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
old_rule); old_rule);
if (trans == NULL) { if (trans == NULL) {
...@@ -2370,32 +2373,34 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, ...@@ -2370,32 +2373,34 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
} }
nft_deactivate_next(net, old_rule); nft_deactivate_next(net, old_rule);
chain->use--; chain->use--;
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
err = -ENOMEM;
goto err2;
}
list_add_tail_rcu(&rule->list, &old_rule->list); list_add_tail_rcu(&rule->list, &old_rule->list);
} else { } else {
err = -ENOENT; if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
err = -ENOMEM;
goto err2; goto err2;
} }
} else if (nlh->nlmsg_flags & NLM_F_APPEND)
if (nlh->nlmsg_flags & NLM_F_APPEND) {
if (old_rule) if (old_rule)
list_add_rcu(&rule->list, &old_rule->list); list_add_rcu(&rule->list, &old_rule->list);
else else
list_add_tail_rcu(&rule->list, &chain->rules); list_add_tail_rcu(&rule->list, &chain->rules);
else { } else {
if (old_rule) if (old_rule)
list_add_tail_rcu(&rule->list, &old_rule->list); list_add_tail_rcu(&rule->list, &old_rule->list);
else else
list_add_rcu(&rule->list, &chain->rules); list_add_rcu(&rule->list, &chain->rules);
} }
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
err = -ENOMEM;
goto err3;
} }
chain->use++; chain->use++;
return 0; return 0;
err3:
list_del_rcu(&rule->list);
err2: err2:
nf_tables_rule_destroy(&ctx, rule); nf_tables_rule_destroy(&ctx, rule);
err1: err1:
...@@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, ...@@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
err = ops->init(set, &desc, nla); err = ops->init(set, &desc, nla);
if (err < 0) if (err < 0)
goto err2; goto err3;
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0) if (err < 0)
goto err3; goto err4;
list_add_tail_rcu(&set->list, &table->sets); list_add_tail_rcu(&set->list, &table->sets);
table->use++; table->use++;
return 0; return 0;
err3: err4:
ops->destroy(set); ops->destroy(set);
err3:
kfree(set->name);
err2: err2:
kvfree(set); kvfree(set);
err1: err1:
...@@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) ...@@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
struct nft_base_chain *basechain; struct nft_base_chain *basechain;
if (nft_trans_chain_name(trans)) if (nft_trans_chain_name(trans))
strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
if (!nft_is_base_chain(trans->ctx.chain)) if (!nft_is_base_chain(trans->ctx.chain))
return; return;
......
...@@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark"); ...@@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark");
MODULE_ALIAS("ip6t_connmark"); MODULE_ALIAS("ip6t_connmark");
static unsigned int static unsigned int
connmark_tg_shift(struct sk_buff *skb, connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
const struct xt_connmark_tginfo1 *info,
u8 shift_bits, u8 shift_dir)
{ {
enum ip_conntrack_info ctinfo; enum ip_conntrack_info ctinfo;
u_int32_t new_targetmark;
struct nf_conn *ct; struct nf_conn *ct;
u_int32_t newmark; u_int32_t newmark;
...@@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb, ...@@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb,
switch (info->mode) { switch (info->mode) {
case XT_CONNMARK_SET: case XT_CONNMARK_SET:
newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
if (shift_dir == D_SHIFT_RIGHT) if (info->shift_dir == D_SHIFT_RIGHT)
newmark >>= shift_bits; newmark >>= info->shift_bits;
else else
newmark <<= shift_bits; newmark <<= info->shift_bits;
if (ct->mark != newmark) { if (ct->mark != newmark) {
ct->mark = newmark; ct->mark = newmark;
nf_conntrack_event_cache(IPCT_MARK, ct); nf_conntrack_event_cache(IPCT_MARK, ct);
} }
break; break;
case XT_CONNMARK_SAVE: case XT_CONNMARK_SAVE:
newmark = (ct->mark & ~info->ctmask) ^ new_targetmark = (skb->mark & info->nfmask);
(skb->mark & info->nfmask); if (info->shift_dir == D_SHIFT_RIGHT)
if (shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits;
newmark >>= shift_bits;
else else
newmark <<= shift_bits; new_targetmark <<= info->shift_bits;
newmark = (ct->mark & ~info->ctmask) ^
new_targetmark;
if (ct->mark != newmark) { if (ct->mark != newmark) {
ct->mark = newmark; ct->mark = newmark;
nf_conntrack_event_cache(IPCT_MARK, ct); nf_conntrack_event_cache(IPCT_MARK, ct);
} }
break; break;
case XT_CONNMARK_RESTORE: case XT_CONNMARK_RESTORE:
newmark = (skb->mark & ~info->nfmask) ^ new_targetmark = (ct->mark & info->ctmask);
(ct->mark & info->ctmask); if (info->shift_dir == D_SHIFT_RIGHT)
if (shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits;
newmark >>= shift_bits;
else else
newmark <<= shift_bits; new_targetmark <<= info->shift_bits;
newmark = (skb->mark & ~info->nfmask) ^
new_targetmark;
skb->mark = newmark; skb->mark = newmark;
break; break;
} }
...@@ -89,8 +93,14 @@ static unsigned int ...@@ -89,8 +93,14 @@ static unsigned int
connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{ {
const struct xt_connmark_tginfo1 *info = par->targinfo; const struct xt_connmark_tginfo1 *info = par->targinfo;
const struct xt_connmark_tginfo2 info2 = {
return connmark_tg_shift(skb, info, 0, 0); .ctmark = info->ctmark,
.ctmask = info->ctmask,
.nfmask = info->nfmask,
.mode = info->mode,
};
return connmark_tg_shift(skb, &info2);
} }
static unsigned int static unsigned int
...@@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{ {
const struct xt_connmark_tginfo2 *info = par->targinfo; const struct xt_connmark_tginfo2 *info = par->targinfo;
return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info, return connmark_tg_shift(skb, info);
info->shift_bits, info->shift_dir);
} }
static int connmark_tg_check(const struct xt_tgchk_param *par) static int connmark_tg_check(const struct xt_tgchk_param *par)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment