Commit 2e99c07f authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for your net tree,
they are:

* Fix nf_trace in nftables if XT_TRACE=n, from Florian Westphal.

* Don't use the fast payload operation in nf_tables if the length is
  not power of 2 or it is not aligned, from Nikolay Aleksandrov.

* Fix missing break statement the inet flavour of nft_reject, which
  results in evaluating IPv4 packets with the IPv6 evaluation routine,
  from Patrick McHardy.

* Fix wrong kconfig symbol in nft_meta to match the routing realm,
  from Paul Bolle.

* Allocate the NAT null binding when creating new conntracks via
  ctnetlink to avoid that several packets race at initializing the
  the conntrack NAT extension, original patch from Florian Westphal,
  revisited version from me.

* Fix DNAT handling in the snmp NAT helper, the same handling was being
  done for SNAT and DNAT and 2.4 already contains that fix, from
  Francois-Xavier Le Bail.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 960dfc4e 0eba801b
......@@ -2725,7 +2725,7 @@ static inline void nf_reset(struct sk_buff *skb)
static inline void nf_reset_trace(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
......@@ -2742,6 +2742,9 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
dst->nf_trace = src->nf_trace;
#endif
}
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
......
......@@ -707,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->mark = old->mark;
new->skb_iif = old->skb_iif;
__nf_copy(new, old);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
new->nf_trace = old->nf_trace;
#endif
#ifdef CONFIG_NET_SCHED
new->tc_index = old->tc_index;
#ifdef CONFIG_NET_CLS_ACT
......
......@@ -422,9 +422,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
to->nf_trace = from->nf_trace;
#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
......
......@@ -1198,8 +1198,8 @@ static int snmp_translate(struct nf_conn *ct,
map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
} else {
/* DNAT replies */
map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip);
map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip);
}
if (map.from == map.to)
......
......@@ -530,9 +530,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
to->nf_trace = from->nf_trace;
#endif
skb_copy_secmark(to, from);
}
......
......@@ -1310,27 +1310,22 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
}
static int
ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
{
#ifdef CONFIG_NF_NAT_NEEDED
int ret;
if (cda[CTA_NAT_DST]) {
ret = ctnetlink_parse_nat_setup(ct,
NF_NAT_MANIP_DST,
ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
cda[CTA_NAT_DST]);
if (ret < 0)
return ret;
}
if (cda[CTA_NAT_SRC]) {
ret = ctnetlink_parse_nat_setup(ct,
NF_NAT_MANIP_SRC,
ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
cda[CTA_NAT_SRC]);
if (ret < 0)
return ret;
}
return 0;
#else
if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
return 0;
return -EOPNOTSUPP;
#endif
}
......@@ -1659,11 +1654,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
goto err2;
}
if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
err = ctnetlink_change_nat(ct, cda);
err = ctnetlink_setup_nat(ct, cda);
if (err < 0)
goto err2;
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
......
......@@ -432,15 +432,15 @@ nf_nat_setup_info(struct nf_conn *ct,
}
EXPORT_SYMBOL(nf_nat_setup_info);
unsigned int
nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
static unsigned int
__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
{
/* Force range to this IP; let proto decide mapping for
* per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
* Use reply in case it's already been mangled (eg local packet).
*/
union nf_inet_addr ip =
(HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
(manip == NF_NAT_MANIP_SRC ?
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
struct nf_nat_range range = {
......@@ -448,7 +448,13 @@ nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
.min_addr = ip,
.max_addr = ip,
};
return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
return nf_nat_setup_info(ct, &range, manip);
}
unsigned int
nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
{
return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
}
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
......@@ -702,9 +708,9 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
static int
nfnetlink_parse_nat(const struct nlattr *nat,
const struct nf_conn *ct, struct nf_nat_range *range)
const struct nf_conn *ct, struct nf_nat_range *range,
const struct nf_nat_l3proto *l3proto)
{
const struct nf_nat_l3proto *l3proto;
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
......@@ -714,38 +720,46 @@ nfnetlink_parse_nat(const struct nlattr *nat,
if (err < 0)
return err;
rcu_read_lock();
l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
if (l3proto == NULL) {
err = -EAGAIN;
goto out;
}
err = l3proto->nlattr_to_range(tb, range);
if (err < 0)
goto out;
return err;
if (!tb[CTA_NAT_PROTO])
goto out;
return 0;
err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
out:
rcu_read_unlock();
return err;
return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
}
/* This function is called under rcu_read_lock() */
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
struct nf_nat_range range;
const struct nf_nat_l3proto *l3proto;
int err;
err = nfnetlink_parse_nat(attr, ct, &range);
/* Should not happen, restricted to creating new conntracks
* via ctnetlink.
*/
if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
return -EEXIST;
/* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
* attach the null binding, otherwise this may oops.
*/
l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
if (l3proto == NULL)
return -EAGAIN;
/* No NAT information has been passed, allocate the null-binding */
if (attr == NULL)
return __nf_nat_alloc_null_binding(ct, manip);
err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
if (err < 0)
return err;
if (nf_nat_initialized(ct, manip))
return -EEXIST;
return nf_nat_setup_info(ct, &range, manip);
}
......
......@@ -116,7 +116,7 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
#ifdef CONFIG_NET_CLS_ROUTE
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID: {
const struct dst_entry *dst = skb_dst(skb);
......@@ -199,7 +199,7 @@ static int nft_meta_init_validate_get(uint32_t key)
case NFT_META_OIFTYPE:
case NFT_META_SKUID:
case NFT_META_SKGID:
#ifdef CONFIG_NET_CLS_ROUTE
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID:
#endif
#ifdef CONFIG_NETWORK_SECMARK
......
......@@ -135,7 +135,8 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
return ERR_PTR(-EINVAL);
if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
base != NFT_PAYLOAD_LL_HEADER)
return &nft_payload_fast_ops;
else
return &nft_payload_ops;
......
......@@ -21,9 +21,9 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
{
switch (pkt->ops->pf) {
case NFPROTO_IPV4:
nft_reject_ipv4_eval(expr, data, pkt);
return nft_reject_ipv4_eval(expr, data, pkt);
case NFPROTO_IPV6:
nft_reject_ipv6_eval(expr, data, pkt);
return nft_reject_ipv6_eval(expr, data, pkt);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment