Commit 1d965c4d authored by David S. Miller's avatar David S. Miller

Merge branch 'Refactor-flower-classifier-to-remove-dependency-on-rtnl-lock'

Vlad Buslov says:

====================
Refactor flower classifier to remove dependency on rtnl lock

Currently, all netlink protocol handlers for updating rules, actions and
qdiscs are protected with single global rtnl lock which removes any
possibility for parallelism. This patch set is a third step to remove
rtnl lock dependency from TC rules update path.

Recently, new rtnl registration flag RTNL_FLAG_DOIT_UNLOCKED was added.
TC rule update handlers (RTM_NEWTFILTER, RTM_DELTFILTER, etc.) are
already registered with this flag and only take rtnl lock when qdisc or
classifier requires it. Classifiers can indicate that their ops
callbacks don't require caller to hold rtnl lock by setting the
TCF_PROTO_OPS_DOIT_UNLOCKED flag. The goal of this change is to refactor
flower classifier to support unlocked execution and register it with
unlocked flag.

This patch set implements following changes to make flower classifier
concurrency-safe:

- Implement reference counting for individual filters. Change fl_get to
  take reference to filter. Implement tp->ops->put callback that was
  introduced in cls API patch set to release reference to flower filter.

- Use tp->lock spinlock to protect internal classifier data structures
  from concurrent modification.

- Handle concurrent tcf proto deletion by returning EAGAIN, which will
  cause cls API to retry and create new proto instance or return error
  to the user (depending on message type).

- Handle concurrent insertion of filter with same priority and handle by
  returning EAGAIN, which will cause cls API to lookup filter again and
  process it accordingly to netlink message flags.

- Extend flower mask with reference counting and protect masks list with
  masks_lock spinlock.

- Prevent concurrent mask insertion by inserting temporary value to
  masks hash table. This is necessary because mask initialization is a
  sleeping operation and cannot be done while holding tp->lock.

Both chain level and classifier level conflicts are resolved by
returning -EAGAIN to cls API that results restart of whole operation.
This retry mechanism is a result of fine-grained locking approach used
in this and previous changes in series and is necessary to allow
concurrent updates on same chain instance. Alternative approach would be
to lock the whole chain while updating filters on any of child tp's,
adding and removing classifier instances from the chain. However, since
most CPU-intensive parts of filter update code are specifically in
classifier code and its dependencies (extensions and hw offloads), such
approach would negate most of the gains introduced by this change and
previous changes in the series when updating same chain instance.

Tcf hw offloads API is not changed by this patch set and still requires
caller to hold rtnl lock. Refactored flower classifier tracks rtnl lock
state by means of 'rtnl_held' flag provided by cls API and obtains the
lock before calling hw offloads. Following patch set will lift this
restriction and refactor cls hw offloads API to support unlocked
execution.

With these changes flower classifier is safely registered with
TCF_PROTO_OPS_DOIT_UNLOCKED flag in last patch.

Changes from V2 to V3:
- Rebase on latest net-next

Changes from V1 to V2:
- Extend cover letter with explanation about retry mechanism.
- Rebase on current net-next.
- Patch 1:
  - Use rcu_dereference_raw() for tp->root dereference.
  - Update comment in fl_head_dereference().
- Patch 2:
  - Remove redundant check in fl_change error handling code.
  - Add empty line between error check and new handle assignment.
- Patch 3:
  - Refactor loop in fl_get_next_filter() to improve readability.
- Patch 4:
  - Refactor __fl_delete() to improve readability.
- Patch 6:
  - Fix comment in fl_check_assign_mask().
- Patch 9:
  - Extend commit message.
  - Fix error code in comment.
- Patch 11:
  - Fix fl_hw_replace_filter() to always release rtnl lock in error
    handlers.
- Patch 12:
  - Don't take rtnl lock before calling __fl_destroy_filter() in
    workqueue context.
  - Extend commit message with explanation why flower still takes rtnl
    lock before calling hardware offloads API.

Github: <https://github.com/vbuslov/linux/tree/unlocked-flower-cong3>
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 31f1a0e3 92149190
......@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/rhashtable.h>
#include <linux/workqueue.h>
#include <linux/refcount.h>
#include <linux/if_ether.h>
#include <linux/in6.h>
......@@ -75,6 +76,7 @@ struct fl_flow_mask {
struct list_head filters;
struct rcu_work rwork;
struct list_head list;
refcount_t refcnt;
};
struct fl_flow_tmplt {
......@@ -86,6 +88,7 @@ struct fl_flow_tmplt {
struct cls_fl_head {
struct rhashtable ht;
spinlock_t masks_lock; /* Protect masks list */
struct list_head masks;
struct rcu_work rwork;
struct idr handle_idr;
......@@ -104,6 +107,12 @@ struct cls_fl_filter {
u32 in_hw_count;
struct rcu_work rwork;
struct net_device *hw_dev;
/* Flower classifier is unlocked, which means that its reference counter
* can be changed concurrently without any kind of external
* synchronization. Use atomic reference counter to be concurrency-safe.
*/
refcount_t refcnt;
bool deleted;
};
static const struct rhashtable_params mask_ht_params = {
......@@ -304,6 +313,7 @@ static int fl_init(struct tcf_proto *tp)
if (!head)
return -ENOBUFS;
spin_lock_init(&head->masks_lock);
INIT_LIST_HEAD_RCU(&head->masks);
rcu_assign_pointer(tp->root, head);
idr_init(&head->handle_idr);
......@@ -313,6 +323,7 @@ static int fl_init(struct tcf_proto *tp)
static void fl_mask_free(struct fl_flow_mask *mask)
{
WARN_ON(!list_empty(&mask->filters));
rhashtable_destroy(&mask->ht);
kfree(mask);
}
......@@ -328,11 +339,15 @@ static void fl_mask_free_work(struct work_struct *work)
static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
bool async)
{
if (!list_empty(&mask->filters))
if (!refcount_dec_and_test(&mask->refcnt))
return false;
rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
spin_lock(&head->masks_lock);
list_del_rcu(&mask->list);
spin_unlock(&head->masks_lock);
if (async)
tcf_queue_work(&mask->rwork, fl_mask_free_work);
else
......@@ -353,37 +368,48 @@ static void fl_destroy_filter_work(struct work_struct *work)
struct cls_fl_filter *f = container_of(to_rcu_work(work),
struct cls_fl_filter, rwork);
rtnl_lock();
__fl_destroy_filter(f);
rtnl_unlock();
}
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
struct netlink_ext_ack *extack)
bool rtnl_held, struct netlink_ext_ack *extack)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
if (!rtnl_held)
rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f;
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
spin_lock(&tp->lock);
tcf_block_offload_dec(block, &f->flags);
spin_unlock(&tp->lock);
if (!rtnl_held)
rtnl_unlock();
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
struct cls_fl_filter *f,
struct cls_fl_filter *f, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
bool skip_sw = tc_skip_sw(f->flags);
int err;
int err = 0;
if (!rtnl_held)
rtnl_lock();
cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
if (!cls_flower.rule)
return -ENOMEM;
if (!cls_flower.rule) {
err = -ENOMEM;
goto errout;
}
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
cls_flower.command = TC_CLSFLOWER_REPLACE;
......@@ -396,35 +422,48 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (skip_sw) {
if (skip_sw)
NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
return err;
}
return 0;
else
err = 0;
goto errout;
}
err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
kfree(cls_flower.rule);
if (err < 0) {
fl_hw_destroy_filter(tp, f, NULL);
return err;
fl_hw_destroy_filter(tp, f, true, NULL);
goto errout;
} else if (err > 0) {
f->in_hw_count = err;
err = 0;
spin_lock(&tp->lock);
tcf_block_offload_inc(block, &f->flags);
spin_unlock(&tp->lock);
}
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
return -EINVAL;
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
err = -EINVAL;
goto errout;
}
return 0;
errout:
if (!rtnl_held)
rtnl_unlock();
return err;
}
static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
bool rtnl_held)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
if (!rtnl_held)
rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
......@@ -435,27 +474,94 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
cls_flower.stats.pkts,
cls_flower.stats.lastused);
if (!rtnl_held)
rtnl_unlock();
}
static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
{
/* Flower classifier only changes root pointer during init and destroy.
* Users must obtain reference to tcf_proto instance before calling its
* API, so tp->root pointer is protected from concurrent call to
* fl_destroy() by reference counting.
*/
return rcu_dereference_raw(tp->root);
}
static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
static void __fl_put(struct cls_fl_filter *f)
{
if (!refcount_dec_and_test(&f->refcnt))
return;
WARN_ON(!f->deleted);
if (tcf_exts_get_net(&f->exts))
tcf_queue_work(&f->rwork, fl_destroy_filter_work);
else
__fl_destroy_filter(f);
}
static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
{
struct cls_fl_filter *f;
rcu_read_lock();
f = idr_find(&head->handle_idr, handle);
if (f && !refcount_inc_not_zero(&f->refcnt))
f = NULL;
rcu_read_unlock();
return f;
}
static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
unsigned long *handle)
{
struct cls_fl_head *head = fl_head_dereference(tp);
struct cls_fl_filter *f;
rcu_read_lock();
while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
/* don't return filters that are being deleted */
if (refcount_inc_not_zero(&f->refcnt))
break;
++(*handle);
}
rcu_read_unlock();
return f;
}
static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
bool *last, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_head *head = fl_head_dereference(tp);
bool async = tcf_exts_get_net(&f->exts);
bool last;
*last = false;
spin_lock(&tp->lock);
if (f->deleted) {
spin_unlock(&tp->lock);
return -ENOENT;
}
f->deleted = true;
rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
f->mask->filter_ht_params);
idr_remove(&head->handle_idr, f->handle);
list_del_rcu(&f->list);
last = fl_mask_put(head, f->mask, async);
spin_unlock(&tp->lock);
*last = fl_mask_put(head, f->mask, async);
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f, extack);
fl_hw_destroy_filter(tp, f, rtnl_held, extack);
tcf_unbind_filter(tp, &f->res);
if (async)
tcf_queue_work(&f->rwork, fl_destroy_filter_work);
else
__fl_destroy_filter(f);
__fl_put(f);
return last;
return 0;
}
static void fl_destroy_sleepable(struct work_struct *work)
......@@ -472,13 +578,15 @@ static void fl_destroy_sleepable(struct work_struct *work)
static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_head *head = fl_head_dereference(tp);
struct fl_flow_mask *mask, *next_mask;
struct cls_fl_filter *f, *next;
bool last;
list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
list_for_each_entry_safe(f, next, &mask->filters, list) {
if (__fl_delete(tp, f, extack))
__fl_delete(tp, f, &last, rtnl_held, extack);
if (last)
break;
}
}
......@@ -488,11 +596,18 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
tcf_queue_work(&head->rwork, fl_destroy_sleepable);
}
static void fl_put(struct tcf_proto *tp, void *arg)
{
struct cls_fl_filter *f = arg;
__fl_put(f);
}
static void *fl_get(struct tcf_proto *tp, u32 handle)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_head *head = fl_head_dereference(tp);
return idr_find(&head->handle_idr, handle);
return __fl_get(head, handle);
}
static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
......@@ -1227,12 +1342,18 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
INIT_LIST_HEAD_RCU(&newmask->filters);
err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
mask_ht_params);
refcount_set(&newmask->refcnt, 1);
err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
&newmask->ht_node, mask_ht_params);
if (err)
goto errout_destroy;
/* Wait until any potential concurrent users of mask are finished */
synchronize_rcu();
spin_lock(&head->masks_lock);
list_add_tail_rcu(&newmask->list, &head->masks);
spin_unlock(&head->masks_lock);
return newmask;
......@@ -1250,41 +1371,77 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
struct fl_flow_mask *mask)
{
struct fl_flow_mask *newmask;
int ret = 0;
rcu_read_lock();
fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
/* Insert mask as temporary node to prevent concurrent creation of mask
* with same key. Any concurrent lookups with same key will return
* -EAGAIN because mask's refcnt is zero. It is safe to insert
* stack-allocated 'mask' to masks hash table because we call
* synchronize_rcu() before returning from this function (either in case
* of error or after replacing it with heap-allocated mask in
* fl_create_new_mask()).
*/
fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
&mask->ht_node,
mask_ht_params);
if (!fnew->mask) {
if (fold)
return -EINVAL;
rcu_read_unlock();
if (fold) {
ret = -EINVAL;
goto errout_cleanup;
}
newmask = fl_create_new_mask(head, mask);
if (IS_ERR(newmask))
return PTR_ERR(newmask);
if (IS_ERR(newmask)) {
ret = PTR_ERR(newmask);
goto errout_cleanup;
}
fnew->mask = newmask;
return 0;
} else if (IS_ERR(fnew->mask)) {
ret = PTR_ERR(fnew->mask);
} else if (fold && fold->mask != fnew->mask) {
return -EINVAL;
ret = -EINVAL;
} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
/* Mask was deleted concurrently, try again */
ret = -EAGAIN;
}
rcu_read_unlock();
return ret;
return 0;
errout_cleanup:
rhashtable_remove_fast(&head->ht, &mask->ht_node,
mask_ht_params);
/* Wait until any potential concurrent users of mask are finished */
synchronize_rcu();
return ret;
}
static int fl_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_fl_filter *f, struct fl_flow_mask *mask,
unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr,
struct fl_flow_tmplt *tmplt,
struct fl_flow_tmplt *tmplt, bool rtnl_held,
struct netlink_ext_ack *extack)
{
int err;
err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true,
err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
extack);
if (err < 0)
return err;
if (tb[TCA_FLOWER_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
if (!rtnl_held)
rtnl_lock();
tcf_bind_filter(tp, &f->res, base);
if (!rtnl_held)
rtnl_unlock();
}
err = fl_set_key(net, tb, &f->key, &mask->key, extack);
......@@ -1308,19 +1465,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
void **arg, bool ovr, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_head *head = fl_head_dereference(tp);
struct cls_fl_filter *fold = *arg;
struct cls_fl_filter *fnew;
struct fl_flow_mask *mask;
struct nlattr **tb;
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
if (!tca[TCA_OPTIONS]) {
err = -EINVAL;
goto errout_fold;
}
mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
if (!mask)
return -ENOBUFS;
if (!mask) {
err = -ENOBUFS;
goto errout_fold;
}
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
if (!tb) {
......@@ -1343,6 +1504,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
err = -ENOBUFS;
goto errout_tb;
}
refcount_set(&fnew->refcnt, 1);
err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
if (err < 0)
......@@ -1358,7 +1520,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tp->chain->tmplt_priv, extack);
tp->chain->tmplt_priv, rtnl_held, extack);
if (err)
goto errout;
......@@ -1366,73 +1528,111 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout;
if (!handle) {
handle = 1;
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
INT_MAX, GFP_KERNEL);
} else if (!fold) {
/* user specifies a handle and it doesn't exist */
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
handle, GFP_KERNEL);
}
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
if (err)
goto errout_mask;
fnew->handle = handle;
}
if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
err = -EEXIST;
goto errout_idr;
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
spin_lock(&tp->lock);
/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
* proto again or create new one, if necessary.
*/
if (tp->deleting) {
err = -EAGAIN;
goto errout_hw;
}
refcount_inc(&fnew->refcnt);
if (fold) {
/* Fold filter was deleted concurrently. Retry lookup. */
if (fold->deleted) {
err = -EAGAIN;
goto errout_hw;
}
fnew->handle = handle;
err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
if (err)
goto errout_idr;
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, extack);
if (err)
goto errout_mask_ht;
}
goto errout_hw;
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
if (fold) {
rhashtable_remove_fast(&fold->mask->ht,
&fold->ht_node,
fold->mask->filter_ht_params);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, NULL);
}
*arg = fnew;
if (fold) {
idr_replace(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list);
fold->deleted = true;
spin_unlock(&tp->lock);
fl_mask_put(head, fold->mask, true);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
/* Caller holds reference to fold, so refcnt is always > 0
* after this.
*/
refcount_dec(&fold->refcnt);
__fl_put(fold);
} else {
if (__fl_lookup(fnew->mask, &fnew->mkey)) {
err = -EEXIST;
goto errout_hw;
}
if (handle) {
/* user specifies a handle and it doesn't exist */
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
handle, GFP_ATOMIC);
/* Filter with specified handle was concurrently
* inserted after initial check in cls_api. This is not
* necessarily an error if NLM_F_EXCL is not set in
* message flags. Returning EAGAIN will cause cls_api to
* try to update concurrently inserted rule.
*/
if (err == -ENOSPC)
err = -EAGAIN;
} else {
handle = 1;
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
INT_MAX, GFP_ATOMIC);
}
if (err)
goto errout_hw;
fnew->handle = handle;
err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
if (err)
goto errout_idr;
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
spin_unlock(&tp->lock);
}
*arg = fnew;
kfree(tb);
kfree(mask);
return 0;
errout_mask_ht:
rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
errout_idr:
if (!fold)
idr_remove(&head->handle_idr, fnew->handle);
errout_hw:
spin_unlock(&tp->lock);
if (!tc_skip_hw(fnew->flags))
fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
errout_mask:
fl_mask_put(head, fnew->mask, false);
fl_mask_put(head, fnew->mask, true);
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
......@@ -1440,37 +1640,42 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
kfree(tb);
errout_mask_alloc:
kfree(mask);
errout_fold:
if (fold)
__fl_put(fold);
return err;
}
static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
bool rtnl_held, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_head *head = fl_head_dereference(tp);
struct cls_fl_filter *f = arg;
bool last_on_mask;
int err = 0;
rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
f->mask->filter_ht_params);
__fl_delete(tp, f, extack);
err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
*last = list_empty(&head->masks);
return 0;
__fl_put(f);
return err;
}
static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
bool rtnl_held)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f;
arg->count = arg->skip;
while ((f = idr_get_next_ul(&head->handle_idr,
&arg->cookie)) != NULL) {
while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
if (arg->fn(tp, f, arg) < 0) {
__fl_put(f);
arg->stop = 1;
break;
}
arg->cookie = f->handle + 1;
__fl_put(f);
arg->cookie++;
arg->count++;
}
}
......@@ -1478,7 +1683,7 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_head *head = fl_head_dereference(tp);
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
struct fl_flow_mask *mask;
......@@ -1526,8 +1731,10 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
continue;
}
spin_lock(&tp->lock);
tc_cls_offload_cnt_update(block, &f->in_hw_count,
&f->flags, add);
spin_unlock(&tp->lock);
}
}
......@@ -2061,6 +2268,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct cls_fl_filter *f = fh;
struct nlattr *nest;
struct fl_flow_key *key, *mask;
bool skip_hw;
if (!f)
return skb->len;
......@@ -2071,21 +2279,26 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (!nest)
goto nla_put_failure;
spin_lock(&tp->lock);
if (f->res.classid &&
nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
goto nla_put_failure;
goto nla_put_failure_locked;
key = &f->key;
mask = &f->mask->key;
skip_hw = tc_skip_hw(f->flags);
if (fl_dump_key(skb, net, key, mask))
goto nla_put_failure;
if (!tc_skip_hw(f->flags))
fl_hw_update_stats(tp, f);
goto nla_put_failure_locked;
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
goto nla_put_failure;
goto nla_put_failure_locked;
spin_unlock(&tp->lock);
if (!skip_hw)
fl_hw_update_stats(tp, f, rtnl_held);
if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
goto nla_put_failure;
......@@ -2100,6 +2313,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
return skb->len;
nla_put_failure_locked:
spin_unlock(&tp->lock);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
......@@ -2144,6 +2359,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.init = fl_init,
.destroy = fl_destroy,
.get = fl_get,
.put = fl_put,
.change = fl_change,
.delete = fl_delete,
.walk = fl_walk,
......@@ -2154,6 +2370,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.tmplt_destroy = fl_tmplt_destroy,
.tmplt_dump = fl_tmplt_dump,
.owner = THIS_MODULE,
.flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
};
static int __init cls_fl_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment