Commit 7da538c1 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) Wait for rcu grace period after releasing netns in ctnetlink,
   from Florian Westphal.

2) Incorrect command type in flowtable offload ndo invocation,
   from wenxu.

3) Incorrect callback type in flowtable offload flow tuple
   updates, also from wenxu.

4) Fix compile warning on flowtable offload infrastructure due to
   possible reference to uninitialized variable, from Nathan Chancellor.

5) Do not inline nf_ct_resolve_clash(), this is called from slow
   path / stress situations. From Florian Westphal.

6) Missing IPv6 flow selector description in flowtable offload.

7) Missing check for NETDEV_UNREGISTER in nf_tables offload
   infrastructure, from wenxu.

8) Update NAT selftest to use randomized netns names, from
   Florian Westphal.

9) Restore nfqueue bridge support, from Marco Oliverio.

10) Compilation warning in SCTP_CHUNKMAP_*() on xt_sctp header.
    From Phil Sutter.

11) Fix bogus lookup/get match for non-anonymous rbtree sets.

12) Missing netlink validation for NFT_SET_ELEM_INTERVAL_END
    elements.

13) Missing netlink validation for NFT_DATA_VALUE after
    nft_data_init().

14) If rule specifies no actions, offload infrastructure returns
    EOPNOTSUPP.

15) Module refcount leak in object updates.

16) Missing sanitization for ARP traffic from br_netfilter, from
    Eric Dumazet.

17) Compilation breakage on big-endian due to incorrect memcpy()
    size in the flowtable offload infrastructure.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f8fc57e8 7acd9378
......@@ -41,19 +41,19 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] |= \
1 << (type % bytes(__u32)); \
1u << (type % bytes(__u32)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] &= \
~(1 << (type % bytes(__u32))); \
~(1u << (type % bytes(__u32))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
((chunkmap)[type / bytes (__u32)] & \
(1 << (type % bytes (__u32)))) ? 1: 0; \
(1u << (type % bytes (__u32)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
......
......@@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv,
nf_bridge_pull_encap_header(skb);
}
if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
return NF_DROP;
if (arp_hdr(skb)->ar_pln != 4) {
if (is_vlan_arp(skb, state->net))
nf_bridge_push_encap_header(skb);
......
......@@ -895,9 +895,10 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
}
/* Resolve race on insertion if this protocol allows this. */
static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conntrack_tuple_hash *h)
static __cold noinline int
nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conntrack_tuple_hash *h)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
......
......@@ -3626,6 +3626,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
list_for_each_entry(net, net_exit_list, exit_list)
ctnetlink_net_exit(net);
/* wait for other cpus until they are done with ctnl_notifiers */
synchronize_rcu();
}
static struct pernet_operations ctnetlink_net_ops = {
......
......@@ -28,6 +28,7 @@ struct nf_flow_key {
struct flow_dissector_key_basic basic;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
};
struct flow_dissector_key_tcp tcp;
struct flow_dissector_key_ports tp;
......@@ -57,6 +58,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
......@@ -69,9 +71,18 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
key->ipv4.dst = tuple->dst_v4.s_addr;
mask->ipv4.dst = 0xffffffff;
break;
case AF_INET6:
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
key->basic.n_proto = htons(ETH_P_IPV6);
key->ipv6.src = tuple->src_v6;
memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
key->ipv6.dst = tuple->dst_v6;
memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
break;
default:
return -EOPNOTSUPP;
}
match->dissector.used_keys |= BIT(key->control.addr_type);
mask->basic.n_proto = 0xffff;
switch (tuple->l4proto) {
......@@ -96,14 +107,13 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS);
return 0;
}
static void flow_offload_mangle(struct flow_action_entry *entry,
enum flow_action_mangle_base htype,
u32 offset, u8 *value, u8 *mask)
enum flow_action_mangle_base htype, u32 offset,
const __be32 *value, const __be32 *mask)
{
entry->id = FLOW_ACTION_MANGLE;
entry->mangle.htype = htype;
......@@ -140,12 +150,12 @@ static int flow_offload_eth_src(struct net *net,
memcpy(&val16, dev->dev_addr, 2);
val = val16 << 16;
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
(u8 *)&val, (u8 *)&mask);
&val, &mask);
mask = ~0xffffffff;
memcpy(&val, dev->dev_addr + 2, 4);
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
(u8 *)&val, (u8 *)&mask);
&val, &mask);
dev_put(dev);
return 0;
......@@ -170,13 +180,13 @@ static int flow_offload_eth_dst(struct net *net,
mask = ~0xffffffff;
memcpy(&val, n->ha, 4);
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
(u8 *)&val, (u8 *)&mask);
&val, &mask);
mask = ~0x0000ffff;
memcpy(&val16, n->ha + 4, 2);
val = val16;
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
(u8 *)&val, (u8 *)&mask);
&val, &mask);
neigh_release(n);
return 0;
......@@ -206,7 +216,7 @@ static void flow_offload_ipv4_snat(struct net *net,
}
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
(u8 *)&addr, (u8 *)&mask);
&addr, &mask);
}
static void flow_offload_ipv4_dnat(struct net *net,
......@@ -233,12 +243,12 @@ static void flow_offload_ipv4_dnat(struct net *net,
}
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
(u8 *)&addr, (u8 *)&mask);
&addr, &mask);
}
static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
unsigned int offset,
u8 *addr, u8 *mask)
const __be32 *addr, const __be32 *mask)
{
struct flow_action_entry *entry;
int i;
......@@ -246,8 +256,7 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
entry = flow_action_entry_next(flow_rule);
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
offset + i,
&addr[i], mask);
offset + i, &addr[i], mask);
}
}
......@@ -257,23 +266,23 @@ static void flow_offload_ipv6_snat(struct net *net,
struct nf_flow_rule *flow_rule)
{
u32 mask = ~htonl(0xffffffff);
const u8 *addr;
const __be32 *addr;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr;
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, saddr);
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr;
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, daddr);
break;
default:
return;
}
flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
}
static void flow_offload_ipv6_dnat(struct net *net,
......@@ -282,23 +291,23 @@ static void flow_offload_ipv6_dnat(struct net *net,
struct nf_flow_rule *flow_rule)
{
u32 mask = ~htonl(0xffffffff);
const u8 *addr;
const __be32 *addr;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr;
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, daddr);
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr;
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, saddr);
break;
default:
return;
}
flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
}
static int flow_offload_l4proto(const struct flow_offload *flow)
......@@ -326,25 +335,24 @@ static void flow_offload_port_snat(struct net *net,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffff0000);
__be16 port;
u32 mask = ~htonl(0xffff0000), port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
offset = 0; /* offsetof(struct tcphdr, source); */
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
offset = 0; /* offsetof(struct tcphdr, dest); */
break;
default:
break;
return;
}
port = htonl(port << 16);
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
(u8 *)&port, (u8 *)&mask);
&port, &mask);
}
static void flow_offload_port_dnat(struct net *net,
......@@ -353,25 +361,24 @@ static void flow_offload_port_dnat(struct net *net,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffff);
__be16 port;
u32 mask = ~htonl(0xffff), port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
offset = 0; /* offsetof(struct tcphdr, source); */
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
offset = 0; /* offsetof(struct tcphdr, dest); */
break;
default:
break;
return;
}
port = htonl(port);
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
(u8 *)&port, (u8 *)&mask);
&port, &mask);
}
static void flow_offload_ipv4_checksum(struct net *net,
......@@ -574,7 +581,7 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
cls_flow.rule = flow_rule->rule;
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
err = block_cb->cb(TC_SETUP_FT, &cls_flow,
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
block_cb->cb_priv);
if (err < 0)
continue;
......@@ -599,7 +606,7 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload,
&offload->flow->tuplehash[dir].tuple, &extack);
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
}
......@@ -656,7 +663,7 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload,
&offload->flow->tuplehash[dir].tuple, &extack);
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
memcpy(stats, &cls_flow.stats, sizeof(*stats));
}
......@@ -822,7 +829,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo);
if (err < 0)
return err;
......
......@@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
goto err;
}
if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
if (skb_dst(skb) && !skb_dst_force(skb)) {
status = -ENETDOWN;
goto err;
}
......
......@@ -4519,8 +4519,10 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
err = -EINVAL;
if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
nft_data_release(&elem.key.val, desc.type);
return err;
}
priv = set->ops->get(ctx->net, set, &elem, flags);
if (IS_ERR(priv))
......@@ -4756,14 +4758,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
if (nla[NFTA_SET_ELEM_DATA] != NULL &&
flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
}
if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
(nla[NFTA_SET_ELEM_DATA] ||
nla[NFTA_SET_ELEM_OBJREF] ||
nla[NFTA_SET_ELEM_TIMEOUT] ||
nla[NFTA_SET_ELEM_EXPIRATION] ||
nla[NFTA_SET_ELEM_USERDATA] ||
nla[NFTA_SET_ELEM_EXPR]))
return -EINVAL;
timeout = 0;
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
......@@ -5476,7 +5484,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
type = nft_obj_type_get(net, objtype);
type = __nft_obj_type_get(objtype);
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
......
......@@ -44,6 +44,9 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
expr = nft_expr_next(expr);
}
if (num_actions == 0)
return ERR_PTR(-EOPNOTSUPP);
flow = nft_flow_rule_alloc(num_actions);
if (!flow)
return ERR_PTR(-ENOMEM);
......@@ -577,6 +580,9 @@ static int nft_offload_netdev_event(struct notifier_block *this,
struct net *net = dev_net(dev);
struct nft_chain *chain;
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
mutex_lock(&net->nft.commit_mutex);
chain = __nft_offload_get_chain(dev);
if (chain)
......
......@@ -80,7 +80,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
tb[NFTA_BITWISE_MASK]);
if (err < 0)
return err;
if (d1.len != priv->len) {
if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) {
err = -EINVAL;
goto err1;
}
......@@ -89,7 +89,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
tb[NFTA_BITWISE_XOR]);
if (err < 0)
goto err1;
if (d2.len != priv->len) {
if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) {
err = -EINVAL;
goto err2;
}
......
......@@ -81,6 +81,12 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (err < 0)
return err;
if (desc.type != NFT_DATA_VALUE) {
err = -EINVAL;
nft_data_release(&priv->data, desc.type);
return err;
}
priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
err = nft_validate_register_load(priv->sreg, desc.len);
if (err < 0)
......
......@@ -66,11 +66,21 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
if (err < 0)
return err;
if (desc_from.type != NFT_DATA_VALUE) {
err = -EINVAL;
goto err1;
}
err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to),
&desc_to, tb[NFTA_RANGE_TO_DATA]);
if (err < 0)
goto err1;
if (desc_to.type != NFT_DATA_VALUE) {
err = -EINVAL;
goto err2;
}
if (desc_from.len != desc_to.len) {
err = -EINVAL;
goto err2;
......
......@@ -74,8 +74,13 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (nft_rbtree_interval_end(rbe))
goto out;
if (nft_rbtree_interval_end(rbe)) {
if (nft_set_is_anonymous(set))
return false;
parent = rcu_dereference_raw(parent->rb_left);
interval = NULL;
continue;
}
*ext = &rbe->ext;
return true;
......@@ -88,7 +93,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
*ext = &interval->ext;
return true;
}
out:
return false;
}
......@@ -139,8 +144,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
if (flags & NFT_SET_ELEM_INTERVAL_END)
interval = rbe;
} else {
if (!nft_set_elem_active(&rbe->ext, genmask))
if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
......@@ -148,7 +155,11 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
*elem = rbe;
return true;
}
return false;
if (nft_rbtree_interval_end(rbe))
interval = NULL;
parent = rcu_dereference_raw(parent->rb_left);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment