Commit 195c234d authored by Vlad Buslov's avatar Vlad Buslov Committed by David S. Miller

net: sched: flower: handle concurrent mask insertion

Without rtnl lock protection masks with same key can be inserted
concurrently. Insert temporary mask with reference count zero to masks
hashtable. This will cause any concurrent modifications to retry.

Wait for rcu grace period to complete after removing temporary mask from
masks hashtable to accommodate concurrent readers.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Suggested-by: default avatarJiri Pirko <jiri@mellanox.com>
Reviewed-by: default avatarStefano Brivio <sbrivio@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f48ef4d5
...@@ -1304,11 +1304,14 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, ...@@ -1304,11 +1304,14 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
INIT_LIST_HEAD_RCU(&newmask->filters); INIT_LIST_HEAD_RCU(&newmask->filters);
refcount_set(&newmask->refcnt, 1); refcount_set(&newmask->refcnt, 1);
err = rhashtable_insert_fast(&head->ht, &newmask->ht_node, err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
mask_ht_params); &newmask->ht_node, mask_ht_params);
if (err) if (err)
goto errout_destroy; goto errout_destroy;
/* Wait until any potential concurrent users of mask are finished */
synchronize_rcu();
list_add_tail_rcu(&newmask->list, &head->masks); list_add_tail_rcu(&newmask->list, &head->masks);
return newmask; return newmask;
...@@ -1330,19 +1333,36 @@ static int fl_check_assign_mask(struct cls_fl_head *head, ...@@ -1330,19 +1333,36 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
int ret = 0; int ret = 0;
rcu_read_lock(); rcu_read_lock();
fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
/* Insert mask as temporary node to prevent concurrent creation of mask
* with same key. Any concurrent lookups with same key will return
* -EAGAIN because mask's refcnt is zero. It is safe to insert
* stack-allocated 'mask' to masks hash table because we call
* synchronize_rcu() before returning from this function (either in case
* of error or after replacing it with heap-allocated mask in
* fl_create_new_mask()).
*/
fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
&mask->ht_node,
mask_ht_params);
if (!fnew->mask) { if (!fnew->mask) {
rcu_read_unlock(); rcu_read_unlock();
if (fold) if (fold) {
return -EINVAL; ret = -EINVAL;
goto errout_cleanup;
}
newmask = fl_create_new_mask(head, mask); newmask = fl_create_new_mask(head, mask);
if (IS_ERR(newmask)) if (IS_ERR(newmask)) {
return PTR_ERR(newmask); ret = PTR_ERR(newmask);
goto errout_cleanup;
}
fnew->mask = newmask; fnew->mask = newmask;
return 0; return 0;
} else if (IS_ERR(fnew->mask)) {
ret = PTR_ERR(fnew->mask);
} else if (fold && fold->mask != fnew->mask) { } else if (fold && fold->mask != fnew->mask) {
ret = -EINVAL; ret = -EINVAL;
} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
...@@ -1351,6 +1371,13 @@ static int fl_check_assign_mask(struct cls_fl_head *head, ...@@ -1351,6 +1371,13 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
} }
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
errout_cleanup:
rhashtable_remove_fast(&head->ht, &mask->ht_node,
mask_ht_params);
/* Wait until any potential concurrent users of mask are finished */
synchronize_rcu();
return ret;
} }
static int fl_set_parms(struct net *net, struct tcf_proto *tp, static int fl_set_parms(struct net *net, struct tcf_proto *tp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment