Commit 3d81e711 authored by Vlad Buslov's avatar Vlad Buslov Committed by David S. Miller

net: sched: flower: protect flower classifier state with spinlock

struct tcf_proto was extended with spinlock to be used by classifiers
instead of global rtnl lock. Use it to protect shared flower classifier
data structures (handle_idr, mask hashtable and list) and fields of
individual filters that can be accessed concurrently. This patch set uses
tcf_proto->lock as per instance lock that protects all filters on
tcf_proto.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Reviewed-by: default avatarStefano Brivio <sbrivio@redhat.com>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 272ffaad
...@@ -384,7 +384,9 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, ...@@ -384,7 +384,9 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
spin_lock(&tp->lock);
tcf_block_offload_dec(block, &f->flags); tcf_block_offload_dec(block, &f->flags);
spin_unlock(&tp->lock);
} }
static int fl_hw_replace_filter(struct tcf_proto *tp, static int fl_hw_replace_filter(struct tcf_proto *tp,
...@@ -426,7 +428,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, ...@@ -426,7 +428,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
return err; return err;
} else if (err > 0) { } else if (err > 0) {
f->in_hw_count = err; f->in_hw_count = err;
spin_lock(&tp->lock);
tcf_block_offload_inc(block, &f->flags); tcf_block_offload_inc(block, &f->flags);
spin_unlock(&tp->lock);
} }
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
...@@ -514,14 +518,19 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, ...@@ -514,14 +518,19 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
*last = false; *last = false;
if (f->deleted) spin_lock(&tp->lock);
if (f->deleted) {
spin_unlock(&tp->lock);
return -ENOENT; return -ENOENT;
}
f->deleted = true; f->deleted = true;
rhashtable_remove_fast(&f->mask->ht, &f->ht_node, rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
f->mask->filter_ht_params); f->mask->filter_ht_params);
idr_remove(&head->handle_idr, f->handle); idr_remove(&head->handle_idr, f->handle);
list_del_rcu(&f->list); list_del_rcu(&f->list);
spin_unlock(&tp->lock);
*last = fl_mask_put(head, f->mask, async); *last = fl_mask_put(head, f->mask, async);
if (!tc_skip_hw(f->flags)) if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f, extack); fl_hw_destroy_filter(tp, f, extack);
...@@ -1500,6 +1509,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, ...@@ -1500,6 +1509,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(fnew->flags)) if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
spin_lock(&tp->lock);
/* tp was deleted concurrently. -EAGAIN will cause caller to lookup /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
* proto again or create new one, if necessary. * proto again or create new one, if necessary.
*/ */
...@@ -1530,6 +1541,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, ...@@ -1530,6 +1541,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
list_replace_rcu(&fold->list, &fnew->list); list_replace_rcu(&fold->list, &fnew->list);
fold->deleted = true; fold->deleted = true;
spin_unlock(&tp->lock);
fl_mask_put(head, fold->mask, true); fl_mask_put(head, fold->mask, true);
if (!tc_skip_hw(fold->flags)) if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, NULL); fl_hw_destroy_filter(tp, fold, NULL);
...@@ -1575,6 +1588,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, ...@@ -1575,6 +1588,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout_idr; goto errout_idr;
list_add_tail_rcu(&fnew->list, &fnew->mask->filters); list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
spin_unlock(&tp->lock);
} }
*arg = fnew; *arg = fnew;
...@@ -1586,6 +1600,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, ...@@ -1586,6 +1600,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
errout_idr: errout_idr:
idr_remove(&head->handle_idr, fnew->handle); idr_remove(&head->handle_idr, fnew->handle);
errout_hw: errout_hw:
spin_unlock(&tp->lock);
if (!tc_skip_hw(fnew->flags)) if (!tc_skip_hw(fnew->flags))
fl_hw_destroy_filter(tp, fnew, NULL); fl_hw_destroy_filter(tp, fnew, NULL);
errout_mask: errout_mask:
...@@ -1688,8 +1703,10 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -1688,8 +1703,10 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
continue; continue;
} }
spin_lock(&tp->lock);
tc_cls_offload_cnt_update(block, &f->in_hw_count, tc_cls_offload_cnt_update(block, &f->in_hw_count,
&f->flags, add); &f->flags, add);
spin_unlock(&tp->lock);
} }
} }
...@@ -2223,6 +2240,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, ...@@ -2223,6 +2240,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct cls_fl_filter *f = fh; struct cls_fl_filter *f = fh;
struct nlattr *nest; struct nlattr *nest;
struct fl_flow_key *key, *mask; struct fl_flow_key *key, *mask;
bool skip_hw;
if (!f) if (!f)
return skb->len; return skb->len;
...@@ -2233,21 +2251,26 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, ...@@ -2233,21 +2251,26 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (!nest) if (!nest)
goto nla_put_failure; goto nla_put_failure;
spin_lock(&tp->lock);
if (f->res.classid && if (f->res.classid &&
nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
goto nla_put_failure; goto nla_put_failure_locked;
key = &f->key; key = &f->key;
mask = &f->mask->key; mask = &f->mask->key;
skip_hw = tc_skip_hw(f->flags);
if (fl_dump_key(skb, net, key, mask)) if (fl_dump_key(skb, net, key, mask))
goto nla_put_failure; goto nla_put_failure_locked;
if (!tc_skip_hw(f->flags))
fl_hw_update_stats(tp, f);
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
goto nla_put_failure; goto nla_put_failure_locked;
spin_unlock(&tp->lock);
if (!skip_hw)
fl_hw_update_stats(tp, f);
if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
goto nla_put_failure; goto nla_put_failure;
...@@ -2262,6 +2285,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, ...@@ -2262,6 +2285,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
return skb->len; return skb->len;
nla_put_failure_locked:
spin_unlock(&tp->lock);
nla_put_failure: nla_put_failure:
nla_nest_cancel(skb, nest); nla_nest_cancel(skb, nest);
return -1; return -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment