Commit 363437f4 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: sfb: optimize enqueue on full queue

In case SFB queue is full (hard limit reached), there is no point
spending time to compute hash and maximum qlen/p_mark.

We instead just early drop packet.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 18cf1248
...@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
u32 r, slot, salt, sfbhash; u32 r, slot, salt, sfbhash;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (unlikely(sch->q.qlen >= q->limit)) {
sch->qstats.overlimits++;
q->stats.queuedrop++;
goto drop;
}
if (q->rehash_interval > 0) { if (q->rehash_interval > 0) {
unsigned long limit = q->rehash_time + q->rehash_interval; unsigned long limit = q->rehash_time + q->rehash_interval;
...@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot ^= 1; slot ^= 1;
sfb_skb_cb(skb)->hashes[slot] = 0; sfb_skb_cb(skb)->hashes[slot] = 0;
if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) { if (unlikely(minqlen >= q->max)) {
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (minqlen >= q->max)
q->stats.bucketdrop++; q->stats.bucketdrop++;
else
q->stats.queuedrop++;
goto drop; goto drop;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment