Commit efbf7897 authored by Cong Wang's avatar Cong Wang Committed by David S. Miller

net_sched: get rid of rcu_barrier() in tcf_block_put_ext()

Both Eric and Paolo noticed the rcu_barrier() we use in
tcf_block_put_ext() could be a performance bottleneck when
we have a lot of tc classes.

Paolo provided the following to demonstrate the issue:

tc qdisc add dev lo root htb
for I in `seq 1 1000`; do
        tc class add dev lo parent 1: classid 1:$I htb rate 100kbit
        tc qdisc add dev lo parent 1:$I handle $((I + 1)): htb
        for J in `seq 1 10`; do
                tc filter add dev lo parent $((I + 1)): u32 match ip src 1.1.1.$J
        done
done
time tc qdisc del dev root

real    0m54.764s
user    0m0.023s
sys     0m0.000s

The rcu_barrier() there is to ensure we free the block after all chains
are gone, that is, to queue tcf_block_put_final() at the tail of workqueue.
We can achieve this ordering requirement by refcnt'ing tcf block instead,
that is, the tcf block is freed only when the last chain in this block is
gone. This also simplifies the code.

Paolo reported after this patch we get:

real    0m0.017s
user    0m0.000s
sys     0m0.017s
Tested-by: default avatarPaolo Abeni <pabeni@redhat.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3e394ef2
...@@ -278,7 +278,6 @@ struct tcf_block { ...@@ -278,7 +278,6 @@ struct tcf_block {
struct net *net; struct net *net;
struct Qdisc *q; struct Qdisc *q;
struct list_head cb_list; struct list_head cb_list;
struct work_struct work;
}; };
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
......
...@@ -218,8 +218,12 @@ static void tcf_chain_flush(struct tcf_chain *chain) ...@@ -218,8 +218,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
static void tcf_chain_destroy(struct tcf_chain *chain) static void tcf_chain_destroy(struct tcf_chain *chain)
{ {
struct tcf_block *block = chain->block;
list_del(&chain->list); list_del(&chain->list);
kfree(chain); kfree(chain);
if (list_empty(&block->chain_list))
kfree(block);
} }
static void tcf_chain_hold(struct tcf_chain *chain) static void tcf_chain_hold(struct tcf_chain *chain)
...@@ -330,27 +334,13 @@ int tcf_block_get(struct tcf_block **p_block, ...@@ -330,27 +334,13 @@ int tcf_block_get(struct tcf_block **p_block,
} }
EXPORT_SYMBOL(tcf_block_get); EXPORT_SYMBOL(tcf_block_get);
static void tcf_block_put_final(struct work_struct *work)
{
struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain, *tmp;
rtnl_lock();
/* At this point, all the chains should have refcnt == 1. */
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain);
rtnl_unlock();
kfree(block);
}
/* XXX: Standalone actions are not allowed to jump to any chain, and bound /* XXX: Standalone actions are not allowed to jump to any chain, and bound
* actions should be all removed after flushing. * actions should be all removed after flushing.
*/ */
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei) struct tcf_block_ext_info *ei)
{ {
struct tcf_chain *chain; struct tcf_chain *chain, *tmp;
/* Hold a refcnt for all chains, except 0, so that they don't disappear /* Hold a refcnt for all chains, except 0, so that they don't disappear
* while we are iterating. * while we are iterating.
...@@ -364,13 +354,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, ...@@ -364,13 +354,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
tcf_block_offload_unbind(block, q, ei); tcf_block_offload_unbind(block, q, ei);
INIT_WORK(&block->work, tcf_block_put_final); /* At this point, all the chains should have refcnt >= 1. Block will be
/* Wait for existing RCU callbacks to cool down, make sure their works * freed after all chains are gone.
* have been queued before this. We can not flush pending works here
* because we are holding the RTNL lock.
*/ */
rcu_barrier(); list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_queue_work(&block->work); tcf_chain_put(chain);
} }
EXPORT_SYMBOL(tcf_block_put_ext); EXPORT_SYMBOL(tcf_block_put_ext);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment