Commit b362487a authored by Cong Wang's avatar Cong Wang Committed by David S. Miller

sch_htb: redefine htb qdisc overlimits

In commit 3c75f6ee ("net_sched: sch_htb: add per class overlimits counter")
we added an overlimits counter for each HTB class which could
properly reflect how many times we use up all the bandwidth
on each class. However, the overlimits counter in HTB qdisc
does not, it is way bigger than the sum of each HTB class.
In fact, this qdisc overlimits counter increases when we have
no skb to dequeue, which happens more often than we run out of
bandwidth.

It makes more sense to make this qdisc overlimits counter just
be a sum of each HTB class, in case people still get confused.

I have verified this patch with one single HTB class, where HTB
qdisc counters now always match HTB class counters as expected.

Eric suggested we could fold this field into 'direct_pkts' as
we only use its 32bit on 64bit CPU, this saves one cache line.

Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f452825d
...@@ -165,7 +165,8 @@ struct htb_sched { ...@@ -165,7 +165,8 @@ struct htb_sched {
/* non shaped skbs; let them go directly thru */ /* non shaped skbs; let them go directly thru */
struct qdisc_skb_head direct_queue; struct qdisc_skb_head direct_queue;
long direct_pkts; u32 direct_pkts;
u32 overlimits;
struct qdisc_watchdog watchdog; struct qdisc_watchdog watchdog;
...@@ -533,8 +534,10 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) ...@@ -533,8 +534,10 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
if (new_mode == cl->cmode) if (new_mode == cl->cmode)
return; return;
if (new_mode == HTB_CANT_SEND) if (new_mode == HTB_CANT_SEND) {
cl->overlimits++; cl->overlimits++;
q->overlimits++;
}
if (cl->prio_activity) { /* not necessary: speed optimization */ if (cl->prio_activity) { /* not necessary: speed optimization */
if (cl->cmode != HTB_CANT_SEND) if (cl->cmode != HTB_CANT_SEND)
...@@ -937,7 +940,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -937,7 +940,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
goto ok; goto ok;
} }
} }
qdisc_qstats_overlimit(sch);
if (likely(next_event > q->now)) if (likely(next_event > q->now))
qdisc_watchdog_schedule_ns(&q->watchdog, next_event); qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
else else
...@@ -1048,6 +1050,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1048,6 +1050,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *nest; struct nlattr *nest;
struct tc_htb_glob gopt; struct tc_htb_glob gopt;
sch->qstats.overlimits = q->overlimits;
/* Its safe to not acquire qdisc lock. As we hold RTNL, /* Its safe to not acquire qdisc lock. As we hold RTNL,
* no change can happen on the qdisc parameters. * no change can happen on the qdisc parameters.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment