Commit b4cd3f93 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

[PKT_SCHED]: Unline inner qdiscs immediately in qdisc_destroy()

Before the RCU change distruction of the qdisc and all inner
qdiscs happend immediately and under the rtnl semaphore. This
made sure nothing holding the rtnl semaphore could end up with
invalid memory. This is not true anymore, inner qdiscs found on
dev->qdisc_list can be suddenly destroyed by the RCU callback.
nothing can find them until they get destroyed.

This also makes semantics sane again, an inner qdiscs should not
be user-visible once the containing qdisc has been destroyed. The
second part (locking in qdisc_lookup) is not really required, but
currently the only purpose of qdisc_tree_lock seems to be to protect
dev->qdisc_list, which is also protected by the rtnl. The rtnl is
especially relied on for making sure nobody frees a qdisc while it
is used in user-context, so qdisc_tree_lock looks unnecessary. I'm
currently reviewing all qdisc locking, if this turns out to be right
I will remove qdisc_tree_lock entirely in a follow-up patch, but for
now I left it in for consistency.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3b4b3bfb
...@@ -196,10 +196,14 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) ...@@ -196,10 +196,14 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{ {
struct Qdisc *q; struct Qdisc *q;
read_lock_bh(&qdisc_tree_lock);
list_for_each_entry(q, &dev->qdisc_list, list) { list_for_each_entry(q, &dev->qdisc_list, list) {
if (q->handle == handle) if (q->handle == handle) {
read_unlock_bh(&qdisc_tree_lock);
return q; return q;
}
} }
read_unlock_bh(&qdisc_tree_lock);
return NULL; return NULL;
} }
......
...@@ -483,10 +483,32 @@ static void __qdisc_destroy(struct rcu_head *head) ...@@ -483,10 +483,32 @@ static void __qdisc_destroy(struct rcu_head *head)
void qdisc_destroy(struct Qdisc *qdisc) void qdisc_destroy(struct Qdisc *qdisc)
{ {
struct list_head cql = LIST_HEAD_INIT(cql);
struct Qdisc *cq, *q, *n;
if (qdisc->flags & TCQ_F_BUILTIN || if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt)) !atomic_dec_and_test(&qdisc->refcnt))
return; return;
list_del(&qdisc->list);
if (!list_empty(&qdisc->list)) {
if (qdisc->ops->cl_ops == NULL)
list_del(&qdisc->list);
else
list_move(&qdisc->list, &cql);
}
/* unlink inner qdiscs from dev->qdisc_list immediately */
list_for_each_entry(cq, &cql, list)
list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
if (q->ops->cl_ops == NULL)
list_del_init(&q->list);
else
list_move_tail(&q->list, &cql);
}
list_for_each_entry_safe(cq, n, &cql, list)
list_del_init(&cq->list);
call_rcu(&qdisc->q_rcu, __qdisc_destroy); call_rcu(&qdisc->q_rcu, __qdisc_destroy);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment