Commit 3e16afd3 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for your net tree,
they are:

1) Missing netlink message sanity check in nfnetlink, patch from
   Mateusz Jurczyk.

2) We now have netfilter per-netns hooks, so let's kill global hook
   infrastructure, this infrastructure is known to be racy with netns.
   We don't care about out of tree modules. Patch from Florian Westphal.

3) find_appropriate_src() is buggy when colissions happens after the
   conversion of the nat bysource to rhashtable. Also from Florian.

4) Remove forward chain in nf_tables arp family, it's useless and it is
   causing quite a bit of confusion, from Florian Westphal.

5) nf_ct_remove_expect() is called with the wrong parameter, causing
   kernel oops, patch from Florian Westphal.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0ddf3fb2 36ac344e
...@@ -61,8 +61,6 @@ typedef unsigned int nf_hookfn(void *priv, ...@@ -61,8 +61,6 @@ typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb, struct sk_buff *skb,
const struct nf_hook_state *state); const struct nf_hook_state *state);
struct nf_hook_ops { struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */ /* User fills in from here down. */
nf_hookfn *hook; nf_hookfn *hook;
struct net_device *dev; struct net_device *dev;
...@@ -160,13 +158,6 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, ...@@ -160,13 +158,6 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n); unsigned int n);
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You /* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */ need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg); int nf_register_sockopt(struct nf_sockopt_ops *reg);
......
...@@ -72,8 +72,7 @@ static const struct nf_chain_type filter_arp = { ...@@ -72,8 +72,7 @@ static const struct nf_chain_type filter_arp = {
.family = NFPROTO_ARP, .family = NFPROTO_ARP,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) | .hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT) | (1 << NF_ARP_OUT),
(1 << NF_ARP_FORWARD),
}; };
static int __init nf_tables_arp_init(void) static int __init nf_tables_arp_init(void)
......
...@@ -227,114 +227,6 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, ...@@ -227,114 +227,6 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
} }
EXPORT_SYMBOL(nf_unregister_net_hooks); EXPORT_SYMBOL(nf_unregister_net_hooks);
static LIST_HEAD(nf_hook_list);
static int _nf_register_hook(struct nf_hook_ops *reg)
{
struct net *net, *last;
int ret;
for_each_net(net) {
ret = nf_register_net_hook(net, reg);
if (ret && ret != -ENOENT)
goto rollback;
}
list_add_tail(&reg->list, &nf_hook_list);
return 0;
rollback:
last = net;
for_each_net(net) {
if (net == last)
break;
nf_unregister_net_hook(net, reg);
}
return ret;
}
int nf_register_hook(struct nf_hook_ops *reg)
{
int ret;
rtnl_lock();
ret = _nf_register_hook(reg);
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(nf_register_hook);
static void _nf_unregister_hook(struct nf_hook_ops *reg)
{
struct net *net;
list_del(&reg->list);
for_each_net(net)
nf_unregister_net_hook(net, reg);
}
void nf_unregister_hook(struct nf_hook_ops *reg)
{
rtnl_lock();
_nf_unregister_hook(reg);
rtnl_unlock();
}
EXPORT_SYMBOL(nf_unregister_hook);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = nf_register_hook(&reg[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
nf_unregister_hooks(reg, i);
return err;
}
EXPORT_SYMBOL(nf_register_hooks);
/* Caller MUST take rtnl_lock() */
int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = _nf_register_hook(&reg[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
_nf_unregister_hooks(reg, i);
return err;
}
EXPORT_SYMBOL(_nf_register_hooks);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
{
while (n-- > 0)
nf_unregister_hook(&reg[n]);
}
EXPORT_SYMBOL(nf_unregister_hooks);
/* Caller MUST take rtnl_lock */
void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
{
while (n-- > 0)
_nf_unregister_hook(&reg[n]);
}
EXPORT_SYMBOL(_nf_unregister_hooks);
/* Returns 1 if okfn() needs to be executed by the caller, /* Returns 1 if okfn() needs to be executed by the caller,
* -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
...@@ -450,37 +342,6 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); ...@@ -450,37 +342,6 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(nf_nat_decode_session_hook); EXPORT_SYMBOL(nf_nat_decode_session_hook);
#endif #endif
static int nf_register_hook_list(struct net *net)
{
struct nf_hook_ops *elem;
int ret;
rtnl_lock();
list_for_each_entry(elem, &nf_hook_list, list) {
ret = nf_register_net_hook(net, elem);
if (ret && ret != -ENOENT)
goto out_undo;
}
rtnl_unlock();
return 0;
out_undo:
list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
nf_unregister_net_hook(net, elem);
rtnl_unlock();
return ret;
}
static void nf_unregister_hook_list(struct net *net)
{
struct nf_hook_ops *elem;
rtnl_lock();
list_for_each_entry(elem, &nf_hook_list, list)
nf_unregister_net_hook(net, elem);
rtnl_unlock();
}
static int __net_init netfilter_net_init(struct net *net) static int __net_init netfilter_net_init(struct net *net)
{ {
int i, h, ret; int i, h, ret;
...@@ -500,16 +361,12 @@ static int __net_init netfilter_net_init(struct net *net) ...@@ -500,16 +361,12 @@ static int __net_init netfilter_net_init(struct net *net)
return -ENOMEM; return -ENOMEM;
} }
#endif #endif
ret = nf_register_hook_list(net);
if (ret)
remove_proc_entry("netfilter", net->proc_net);
return ret; return ret;
} }
static void __net_exit netfilter_net_exit(struct net *net) static void __net_exit netfilter_net_exit(struct net *net)
{ {
nf_unregister_hook_list(net);
remove_proc_entry("netfilter", net->proc_net); remove_proc_entry("netfilter", net->proc_net);
} }
......
...@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) ...@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
h = nf_ct_expect_dst_hash(net, &expect->tuple); h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (expect_matches(i, expect)) { if (expect_matches(i, expect)) {
if (nf_ct_remove_expect(expect)) if (nf_ct_remove_expect(i))
break; break;
} else if (expect_clash(i, expect)) { } else if (expect_clash(i, expect)) {
ret = -EBUSY; ret = -EBUSY;
......
...@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net, ...@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
.tuple = tuple, .tuple = tuple,
.zone = zone .zone = zone
}; };
struct rhlist_head *hl; struct rhlist_head *hl, *h;
hl = rhltable_lookup(&nf_nat_bysource_table, &key, hl = rhltable_lookup(&nf_nat_bysource_table, &key,
nf_nat_bysource_params); nf_nat_bysource_params);
if (!hl)
return 0;
ct = container_of(hl, typeof(*ct), nat_bysource);
rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
nf_ct_invert_tuplepr(result, nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst; result->dst = tuple->dst;
return in_range(l3proto, l4proto, result, range); if (in_range(l3proto, l4proto, result, range))
return 1;
}
return 0;
} }
/* For [FUTURE] fragmentation handling, we want the least-used /* For [FUTURE] fragmentation handling, we want the least-used
......
...@@ -472,8 +472,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) ...@@ -472,8 +472,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
if (msglen > skb->len) if (msglen > skb->len)
msglen = skb->len; msglen = skb->len;
if (nlh->nlmsg_len < NLMSG_HDRLEN || if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
return; return;
err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy, err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
...@@ -500,7 +499,8 @@ static void nfnetlink_rcv(struct sk_buff *skb) ...@@ -500,7 +499,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
{ {
struct nlmsghdr *nlh = nlmsg_hdr(skb); struct nlmsghdr *nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < NLMSG_HDRLEN || if (skb->len < NLMSG_HDRLEN ||
nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len) skb->len < nlh->nlmsg_len)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment