Commit ec3ed293 authored by Vlad Buslov's avatar Vlad Buslov Committed by David S. Miller

net_sched: change tcf_del_walker() to take idrinfo->lock

Action API was changed to work with actions and action_idr in concurrency
safe manner, however tcf_del_walker() still uses actions without taking a
reference or idrinfo->lock first, and deletes them directly, disregarding
possible concurrent delete.

Change tcf_del_walker() to take idrinfo->lock while iterating over actions
and use new tcf_idr_release_unsafe() to release them while holding the
lock.

And the blocking function fl_hw_destroy_tmplt() could be called when we
put a filter chain, so defer it to a work queue.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
[xiyou.wangcong@gmail.com: heavily modify the code and changelog]
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 947e326c
...@@ -246,6 +246,20 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, ...@@ -246,6 +246,20 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
goto done; goto done;
} }
static int tcf_idr_release_unsafe(struct tc_action *p)
{
if (atomic_read(&p->tcfa_bindcnt) > 0)
return -EPERM;
if (refcount_dec_and_test(&p->tcfa_refcnt)) {
idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
tcf_action_cleanup(p);
return ACT_P_DELETED;
}
return 0;
}
static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
const struct tc_action_ops *ops) const struct tc_action_ops *ops)
{ {
...@@ -262,15 +276,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, ...@@ -262,15 +276,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
if (nla_put_string(skb, TCA_KIND, ops->kind)) if (nla_put_string(skb, TCA_KIND, ops->kind))
goto nla_put_failure; goto nla_put_failure;
spin_lock(&idrinfo->lock);
idr_for_each_entry_ul(idr, p, id) { idr_for_each_entry_ul(idr, p, id) {
ret = __tcf_idr_release(p, false, true); ret = tcf_idr_release_unsafe(p);
if (ret == ACT_P_DELETED) { if (ret == ACT_P_DELETED) {
module_put(ops->owner); module_put(ops->owner);
n_i++; n_i++;
} else if (ret < 0) { } else if (ret < 0) {
spin_unlock(&idrinfo->lock);
goto nla_put_failure; goto nla_put_failure;
} }
} }
spin_unlock(&idrinfo->lock);
if (nla_put_u32(skb, TCA_FCNT, n_i)) if (nla_put_u32(skb, TCA_FCNT, n_i))
goto nla_put_failure; goto nla_put_failure;
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
......
...@@ -79,6 +79,7 @@ struct fl_flow_tmplt { ...@@ -79,6 +79,7 @@ struct fl_flow_tmplt {
struct fl_flow_key mask; struct fl_flow_key mask;
struct flow_dissector dissector; struct flow_dissector dissector;
struct tcf_chain *chain; struct tcf_chain *chain;
struct rcu_work rwork;
}; };
struct cls_fl_head { struct cls_fl_head {
...@@ -1437,14 +1438,22 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, ...@@ -1437,14 +1438,22 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
return ERR_PTR(err); return ERR_PTR(err);
} }
static void fl_tmplt_destroy(void *tmplt_priv) static void fl_tmplt_destroy_work(struct work_struct *work)
{ {
struct fl_flow_tmplt *tmplt = tmplt_priv; struct fl_flow_tmplt *tmplt = container_of(to_rcu_work(work),
struct fl_flow_tmplt, rwork);
fl_hw_destroy_tmplt(tmplt->chain, tmplt); fl_hw_destroy_tmplt(tmplt->chain, tmplt);
kfree(tmplt); kfree(tmplt);
} }
static void fl_tmplt_destroy(void *tmplt_priv)
{
struct fl_flow_tmplt *tmplt = tmplt_priv;
tcf_queue_work(&tmplt->rwork, fl_tmplt_destroy_work);
}
static int fl_dump_key_val(struct sk_buff *skb, static int fl_dump_key_val(struct sk_buff *skb,
void *val, int val_type, void *val, int val_type,
void *mask, int mask_type, int len) void *mask, int mask_type, int len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment