Commit 378a2f09 authored by Jarek Poplawski's avatar Jarek Poplawski Committed by David S. Miller

net_sched: Add qdisc __NET_XMIT_STOLEN flag

Patrick McHardy <kaber@trash.net> noticed:
"The other problem that affects all qdiscs supporting actions is
TC_ACT_QUEUED/TC_ACT_STOLEN getting mapped to NET_XMIT_SUCCESS
even though the packet is not queued, corrupting upper qdiscs'
qlen counters."

and later explained:
"The reason why it translates it at all seems to be to not increase
the drops counter. Within a single qdisc this could be avoided by
other means easily, upper qdiscs would still increase the counter
when we return anything besides NET_XMIT_SUCCESS though.

This means we need a new NET_XMIT return value to indicate this to
the upper qdiscs. So I'd suggest to introduce NET_XMIT_STOLEN,
return that to upper qdiscs and translate it to NET_XMIT_SUCCESS
in dev_queue_xmit, similar to NET_XMIT_BYPASS."

David Miller <davem@davemloft.net> noticed:
"Maybe these NET_XMIT_* values being passed around should be a set of
bits. They could be composed of base meanings, combined with specific
attributes.

So you could say "NET_XMIT_DROP | __NET_XMIT_NO_DROP_COUNT"

The attributes get masked out by the top-level ->enqueue() caller,
such that the base meanings are the only thing that make their
way up into the stack. If it's only about communication within the
qdisc tree, let's simply code it that way."

This patch is trying to realize these ideas.
Signed-off-by: default avatarJarek Poplawski <jarkao2@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6e583ce5
...@@ -64,6 +64,7 @@ struct wireless_dev; ...@@ -64,6 +64,7 @@ struct wireless_dev;
#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
(TC use only - dev_queue_xmit (TC use only - dev_queue_xmit
returns this as NET_XMIT_SUCCESS) */ returns this as NET_XMIT_SUCCESS) */
#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
/* Backlog congestion levels */ /* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
......
...@@ -343,6 +343,18 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) ...@@ -343,6 +343,18 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
return qdisc_skb_cb(skb)->pkt_len; return qdisc_skb_cb(skb)->pkt_len;
} }
#ifdef CONFIG_NET_CLS_ACT
/* additional qdisc xmit flags */
enum net_xmit_qdisc_t {
__NET_XMIT_STOLEN = 0x00010000,
};
#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else
#define net_xmit_drop_count(e) (1)
#endif
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
...@@ -355,7 +367,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -355,7 +367,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
{ {
qdisc_skb_cb(skb)->pkt_len = skb->len; qdisc_skb_cb(skb)->pkt_len = skb->len;
return qdisc_enqueue(skb, sch); return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
} }
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
......
...@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
kfree_skb(skb); kfree_skb(skb);
goto drop; goto drop;
...@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, flow->q); ret = qdisc_enqueue(skb, flow->q);
if (ret != 0) { if (ret != 0) {
drop: __maybe_unused drop: __maybe_unused
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
if (flow) if (flow)
flow->qstats.drops++; flow->qstats.drops++;
}
return ret; return ret;
} }
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
...@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
if (!ret) { if (!ret) {
sch->q.qlen++; sch->q.qlen++;
sch->qstats.requeues++; sch->qstats.requeues++;
} else { } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
p->link.qstats.drops++; p->link.qstats.drops++;
} }
......
...@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (result) { switch (result) {
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
return NULL; return NULL;
case TC_ACT_RECLASSIFY: case TC_ACT_RECLASSIFY:
...@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
} }
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
cl->qstats.drops++; cl->qstats.drops++;
}
return ret; return ret;
} }
...@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;
} }
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
cl->qstats.drops++; cl->qstats.drops++;
}
return ret; return ret;
} }
...@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ...@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
q->rx_class = NULL; q->rx_class = NULL;
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
int ret;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
q->rx_class = cl; q->rx_class = cl;
cl->q->__parent = sch; cl->q->__parent = sch;
if (qdisc_enqueue(skb, cl->q) == 0) { ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; sch->bstats.packets++;
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
...@@ -678,6 +684,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ...@@ -678,6 +684,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;
} }
if (net_xmit_drop_count(ret))
sch->qstats.drops++; sch->qstats.drops++;
return 0; return 0;
} }
......
...@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
goto drop; goto drop;
...@@ -254,6 +254,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -254,6 +254,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, p->q); err = qdisc_enqueue(skb, p->q);
if (err != NET_XMIT_SUCCESS) { if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
sch->qstats.drops++; sch->qstats.drops++;
return err; return err;
} }
...@@ -321,6 +322,7 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -321,6 +322,7 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
err = p->q->ops->requeue(skb, p->q); err = p->q->ops->requeue(skb, p->q);
if (err != NET_XMIT_SUCCESS) { if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
sch->qstats.drops++; sch->qstats.drops++;
return err; return err;
} }
......
...@@ -1166,7 +1166,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -1166,7 +1166,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (result) { switch (result) {
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
return NULL; return NULL;
} }
...@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
sch->qstats.drops++; sch->qstats.drops++;
}
return err; return err;
} }
......
...@@ -221,7 +221,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -221,7 +221,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
switch (result) { switch (result) {
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
return NULL; return NULL;
} }
...@@ -572,9 +572,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -572,9 +572,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
#endif #endif
} else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
cl->qstats.drops++; cl->qstats.drops++;
}
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
cl->bstats.packets += cl->bstats.packets +=
...@@ -615,10 +617,12 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -615,10 +617,12 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
#endif #endif
} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
NET_XMIT_SUCCESS) { NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
cl->qstats.drops++; cl->qstats.drops++;
}
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else } else
htb_activate(q, cl); htb_activate(q, cl);
......
...@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++; sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++; sch->bstats.packets++;
} else } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
}
pr_debug("netem: enqueue ret %d\n", ret); pr_debug("netem: enqueue ret %d\n", ret);
return ret; return ret;
......
...@@ -45,7 +45,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -45,7 +45,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (err) { switch (err) {
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
return NULL; return NULL;
} }
...@@ -88,6 +88,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -88,6 +88,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
if (net_xmit_drop_count(ret))
sch->qstats.drops++; sch->qstats.drops++;
return ret; return ret;
} }
...@@ -114,6 +115,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -114,6 +115,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
sch->qstats.requeues++; sch->qstats.requeues++;
return 0; return 0;
} }
if (net_xmit_drop_count(ret))
sch->qstats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
......
...@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++; sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
} else { } else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++; q->stats.pdrop++;
sch->qstats.drops++; sch->qstats.drops++;
} }
......
...@@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
switch (result) { switch (result) {
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
return 0; return 0;
} }
......
...@@ -135,6 +135,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -135,6 +135,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, q->qdisc); ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) { if (ret != 0) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++; sch->qstats.drops++;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment