Commit 45040978 authored by Jozsef Kadlecsik's avatar Jozsef Kadlecsik

netfilter: ipset: Fix set:list type crash when flush/dump set in parallel

Flushing/listing entries was not RCU safe, so parallel flush/dump
could lead to kernel crash. Bug reported by Deniz Eren.

Fixes netfilter bugzilla id #1050.
Signed-off-by: default avatarJozsef Kadlecsik <kadlec@blackhole.kfki.hu>
parent 5cc6ce9f
...@@ -985,6 +985,9 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl, ...@@ -985,6 +985,9 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
if (unlikely(protocol_failed(attr))) if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
/* Must wait for flush to be really finished in list:set */
rcu_barrier();
/* Commands are serialized and references are /* Commands are serialized and references are
* protected by the ip_set_ref_lock. * protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call * External systems (i.e. xt_set) must call
......
...@@ -30,6 +30,7 @@ MODULE_ALIAS("ip_set_list:set"); ...@@ -30,6 +30,7 @@ MODULE_ALIAS("ip_set_list:set");
struct set_elem { struct set_elem {
struct rcu_head rcu; struct rcu_head rcu;
struct list_head list; struct list_head list;
struct ip_set *set; /* Sigh, in order to cleanup reference */
ip_set_id_t id; ip_set_id_t id;
} __aligned(__alignof__(u64)); } __aligned(__alignof__(u64));
...@@ -151,30 +152,29 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -151,30 +152,29 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
/* Userspace interfaces: we are protected by the nfnl mutex */ /* Userspace interfaces: we are protected by the nfnl mutex */
static void static void
__list_set_del(struct ip_set *set, struct set_elem *e) __list_set_del_rcu(struct rcu_head * rcu)
{ {
struct set_elem *e = container_of(rcu, struct set_elem, rcu);
struct ip_set *set = e->set;
struct list_set *map = set->data; struct list_set *map = set->data;
ip_set_put_byindex(map->net, e->id); ip_set_put_byindex(map->net, e->id);
/* We may call it, because we don't have a to be destroyed
* extension which is used by the kernel.
*/
ip_set_ext_destroy(set, e); ip_set_ext_destroy(set, e);
kfree_rcu(e, rcu); kfree(e);
} }
static inline void static inline void
list_set_del(struct ip_set *set, struct set_elem *e) list_set_del(struct ip_set *set, struct set_elem *e)
{ {
list_del_rcu(&e->list); list_del_rcu(&e->list);
__list_set_del(set, e); call_rcu(&e->rcu, __list_set_del_rcu);
} }
static inline void static inline void
list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old) list_set_replace(struct set_elem *e, struct set_elem *old)
{ {
list_replace_rcu(&old->list, &e->list); list_replace_rcu(&old->list, &e->list);
__list_set_del(set, old); call_rcu(&old->rcu, __list_set_del_rcu);
} }
static void static void
...@@ -244,9 +244,6 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, ...@@ -244,9 +244,6 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct set_elem *e, *n, *prev, *next; struct set_elem *e, *n, *prev, *next;
bool flag_exist = flags & IPSET_FLAG_EXIST; bool flag_exist = flags & IPSET_FLAG_EXIST;
if (SET_WITH_TIMEOUT(set))
set_cleanup_entries(set);
/* Find where to add the new entry */ /* Find where to add the new entry */
n = prev = next = NULL; n = prev = next = NULL;
list_for_each_entry(e, &map->members, list) { list_for_each_entry(e, &map->members, list) {
...@@ -301,10 +298,11 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, ...@@ -301,10 +298,11 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (!e) if (!e)
return -ENOMEM; return -ENOMEM;
e->id = d->id; e->id = d->id;
e->set = set;
INIT_LIST_HEAD(&e->list); INIT_LIST_HEAD(&e->list);
list_set_init_extensions(set, ext, e); list_set_init_extensions(set, ext, e);
if (n) if (n)
list_set_replace(set, e, n); list_set_replace(e, n);
else if (next) else if (next)
list_add_tail_rcu(&e->list, &next->list); list_add_tail_rcu(&e->list, &next->list);
else if (prev) else if (prev)
...@@ -431,6 +429,7 @@ list_set_destroy(struct ip_set *set) ...@@ -431,6 +429,7 @@ list_set_destroy(struct ip_set *set)
if (SET_WITH_TIMEOUT(set)) if (SET_WITH_TIMEOUT(set))
del_timer_sync(&map->gc); del_timer_sync(&map->gc);
list_for_each_entry_safe(e, n, &map->members, list) { list_for_each_entry_safe(e, n, &map->members, list) {
list_del(&e->list); list_del(&e->list);
ip_set_put_byindex(map->net, e->id); ip_set_put_byindex(map->net, e->id);
...@@ -450,8 +449,10 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) ...@@ -450,8 +449,10 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
struct set_elem *e; struct set_elem *e;
u32 n = 0; u32 n = 0;
list_for_each_entry(e, &map->members, list) rcu_read_lock();
list_for_each_entry_rcu(e, &map->members, list)
n++; n++;
rcu_read_unlock();
nested = ipset_nest_start(skb, IPSET_ATTR_DATA); nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) if (!nested)
...@@ -483,33 +484,25 @@ list_set_list(const struct ip_set *set, ...@@ -483,33 +484,25 @@ list_set_list(const struct ip_set *set,
atd = ipset_nest_start(skb, IPSET_ATTR_ADT); atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd) if (!atd)
return -EMSGSIZE; return -EMSGSIZE;
list_for_each_entry(e, &map->members, list) {
if (i == first)
break;
i++;
}
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_from(e, &map->members, list) { list_for_each_entry_rcu(e, &map->members, list) {
if (i < first ||
(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))) {
i++; i++;
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue; continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (i == first) {
nla_nest_cancel(skb, atd);
ret = -EMSGSIZE;
goto out;
} }
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure; goto nla_put_failure;
}
if (nla_put_string(skb, IPSET_ATTR_NAME, if (nla_put_string(skb, IPSET_ATTR_NAME,
ip_set_name_byindex(map->net, e->id))) ip_set_name_byindex(map->net, e->id)))
goto nla_put_failure; goto nla_put_failure;
if (ip_set_put_extensions(skb, set, e, true)) if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure; goto nla_put_failure;
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
i++;
} }
ipset_nest_end(skb, atd); ipset_nest_end(skb, atd);
...@@ -520,10 +513,12 @@ list_set_list(const struct ip_set *set, ...@@ -520,10 +513,12 @@ list_set_list(const struct ip_set *set,
nla_put_failure: nla_put_failure:
nla_nest_cancel(skb, nested); nla_nest_cancel(skb, nested);
if (unlikely(i == first)) { if (unlikely(i == first)) {
nla_nest_cancel(skb, atd);
cb->args[IPSET_CB_ARG0] = 0; cb->args[IPSET_CB_ARG0] = 0;
ret = -EMSGSIZE; ret = -EMSGSIZE;
} else {
cb->args[IPSET_CB_ARG0] = i;
} }
cb->args[IPSET_CB_ARG0] = i - 1;
ipset_nest_end(skb, atd); ipset_nest_end(skb, atd);
out: out:
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment