Commit 9190b3b3 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: accurate bytes/packets stats/rates

In commit 44b82883 (net_sched: pfifo_head_drop problem), we fixed
a problem with pfifo_head drops that incorrectly decreased
sch->bstats.bytes and sch->bstats.packets

Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
previously enqueued packet, and bstats cannot be changed, so
bstats/rates are not accurate (over estimated)

This patch changes the qdisc_bstats updates to be done at dequeue() time
instead of enqueue() time. bstats counters no longer account for dropped
frames, and rates are more correct, since enqueue() bursts dont have
effect on dequeue() rate.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Acked-by: default avatarStephen Hemminger <shemminger@vyatta.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b3053251
...@@ -445,7 +445,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, ...@@ -445,7 +445,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{ {
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.backlog += qdisc_pkt_len(skb);
qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -460,8 +459,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, ...@@ -460,8 +459,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
{ {
struct sk_buff *skb = __skb_dequeue(list); struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) if (likely(skb != NULL)) {
sch->qstats.backlog -= qdisc_pkt_len(skb); sch->qstats.backlog -= qdisc_pkt_len(skb);
qdisc_bstats_update(sch, skb);
}
return skb; return skb;
} }
...@@ -474,10 +475,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) ...@@ -474,10 +475,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list) struct sk_buff_head *list)
{ {
struct sk_buff *skb = __qdisc_dequeue_head(sch, list); struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
sch->qstats.backlog -= len;
kfree_skb(skb); kfree_skb(skb);
return len; return len;
} }
......
...@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
...@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ...@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
qdisc_bstats_update(sch, skb);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;
...@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch) ...@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch); skb = cbq_dequeue_1(sch);
if (skb) { if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
return skb; return skb;
......
...@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
bstats_update(&cl->bstats, skb); bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return err; return err;
...@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) ...@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc); skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0) if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist); list_del(&cl->alist);
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
} }
......
...@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err; return err;
} }
qdisc_bstats_update(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
...@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) ...@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
if (skb == NULL) if (skb == NULL)
return NULL; return NULL;
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
index = skb->tc_index & (p->indices - 1); index = skb->tc_index & (p->indices - 1);
......
...@@ -46,17 +46,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -46,17 +46,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{ {
struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch); struct fifo_sched_data *q = qdisc_priv(sch);
if (likely(skb_queue_len(&sch->q) < q->limit)) if (likely(skb_queue_len(&sch->q) < q->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */ /* queue full, remove one skb to fulfill the limit */
skb_head = qdisc_dequeue_head(sch); __qdisc_queue_drop_head(sch, &sch->q);
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb_head);
qdisc_enqueue_tail(skb, sch); qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN; return NET_XMIT_CN;
......
...@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
set_active(cl, qdisc_pkt_len(skb)); set_active(cl, qdisc_pkt_len(skb));
bstats_update(&cl->bstats, skb); bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
...@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch) ...@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
} }
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
......
...@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
sch->q.qlen++; sch->q.qlen++;
qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -842,7 +841,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, ...@@ -842,7 +841,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
static struct sk_buff *htb_dequeue(struct Qdisc *sch) static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
int level; int level;
psched_time_t next_event; psched_time_t next_event;
...@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
/* try to dequeue direct packets as high prio (!) to minimize cpu work */ /* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue); skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) { if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
...@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
int prio = ffz(m); int prio = ffz(m);
m |= 1 << prio; m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level); skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL)) { if (likely(skb != NULL))
sch->q.qlen--; goto ok;
sch->flags &= ~TCQ_F_THROTTLED;
goto fin;
}
} }
} }
sch->qstats.overlimits++; sch->qstats.overlimits++;
......
...@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
qdisc_bstats_update(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch) ...@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
qdisc = q->queues[q->curband]; qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc); skb = qdisc->dequeue(qdisc);
if (skb) { if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
} }
......
...@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++; sch->q.qlen++;
qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) { } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
} }
...@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp.tv64 = 0; skb->tstamp.tv64 = 0;
#endif #endif
pr_debug("netem_dequeue: return skb=%p\n", skb); pr_debug("netem_dequeue: return skb=%p\n", skb);
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
} }
...@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) ...@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb); __skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb); sch->qstats.backlog += qdisc_pkt_len(nskb);
qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
qdisc_bstats_update(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch) ...@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
struct Qdisc *qdisc = q->queues[prio]; struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc); struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) { if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
} }
......
...@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_bstats_update(sch, skb);
sch->q.qlen++; sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) { } else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++; q->stats.pdrop++;
...@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch) ...@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
struct Qdisc *child = q->qdisc; struct Qdisc *child = q->qdisc;
skb = child->dequeue(child); skb = child->dequeue(child);
if (skb) if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
else if (!red_is_idling(&q->parms)) } else {
if (!red_is_idling(&q->parms))
red_start_of_idle_period(&q->parms); red_start_of_idle_period(&q->parms);
}
return skb; return skb;
} }
......
...@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tail = slot; q->tail = slot;
slot->allot = q->scaled_quantum; slot->allot = q->scaled_quantum;
} }
if (++sch->q.qlen <= q->limit) { if (++sch->q.qlen <= q->limit)
qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
}
sfq_drop(sch); sfq_drop(sch);
return NET_XMIT_CN; return NET_XMIT_CN;
...@@ -445,6 +443,7 @@ sfq_dequeue(struct Qdisc *sch) ...@@ -445,6 +443,7 @@ sfq_dequeue(struct Qdisc *sch)
} }
skb = slot_dequeue_head(slot); skb = slot_dequeue_head(slot);
sfq_dec(q, a); sfq_dec(q, a);
qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb); sch->qstats.backlog -= qdisc_pkt_len(skb);
......
...@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
} }
sch->q.qlen++; sch->q.qlen++;
qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) ...@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->ptokens = ptoks; q->ptokens = ptoks;
sch->q.qlen--; sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
qdisc_bstats_update(sch, skb);
return skb; return skb;
} }
......
...@@ -87,7 +87,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -87,7 +87,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->q.qlen < dev->tx_queue_len) { if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb); __skb_queue_tail(&q->q, skb);
qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -111,6 +110,8 @@ teql_dequeue(struct Qdisc* sch) ...@@ -111,6 +110,8 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch; dat->m->slaves = sch;
netif_wake_queue(m); netif_wake_queue(m);
} }
} else {
qdisc_bstats_update(sch, skb);
} }
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb; return skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment