Commit 2af8cfac authored by David S. Miller's avatar David S. Miller

Merge branch 'fq_codel-small-optimizations'

Dave Taht says:

====================
Two small fq_codel optimizations

These two patches improve fq_codel performance
under extreme network loads. The first patch
more rapidly escalates the codel count under
overload, the second just kills a totally useless
statistic.

(sent together because they'd otherwise conflict)
====================
Signed-off-by: default avatarDave Taht <dave.taht@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b8fb6406 77ddaff2
...@@ -45,7 +45,6 @@ struct fq_codel_flow { ...@@ -45,7 +45,6 @@ struct fq_codel_flow {
struct sk_buff *tail; struct sk_buff *tail;
struct list_head flowchain; struct list_head flowchain;
int deficit; int deficit;
u32 dropped; /* number of drops (or ECN marks) on this flow */
struct codel_vars cvars; struct codel_vars cvars;
}; /* please try to keep this structure <= 64 bytes */ }; /* please try to keep this structure <= 64 bytes */
...@@ -173,7 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, ...@@ -173,7 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
__qdisc_drop(skb, to_free); __qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold); } while (++i < max_packets && len < threshold);
flow->dropped += i; /* Tell codel to increase its signal strength also */
flow->cvars.count += i;
q->backlogs[idx] -= len; q->backlogs[idx] -= len;
q->memory_usage -= mem; q->memory_usage -= mem;
sch->qstats.drops += i; sch->qstats.drops += i;
...@@ -211,7 +211,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -211,7 +211,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
list_add_tail(&flow->flowchain, &q->new_flows); list_add_tail(&flow->flowchain, &q->new_flows);
q->new_flow_count++; q->new_flow_count++;
flow->deficit = q->quantum; flow->deficit = q->quantum;
flow->dropped = 0;
} }
get_codel_cb(skb)->mem_usage = skb->truesize; get_codel_cb(skb)->mem_usage = skb->truesize;
q->memory_usage += get_codel_cb(skb)->mem_usage; q->memory_usage += get_codel_cb(skb)->mem_usage;
...@@ -310,9 +309,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) ...@@ -310,9 +309,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
&flow->cvars, &q->cstats, qdisc_pkt_len, &flow->cvars, &q->cstats, qdisc_pkt_len,
codel_get_enqueue_time, drop_func, dequeue_func); codel_get_enqueue_time, drop_func, dequeue_func);
flow->dropped += q->cstats.drop_count - prev_drop_count;
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
if (!skb) { if (!skb) {
/* force a pass through old_flows to prevent starvation */ /* force a pass through old_flows to prevent starvation */
if ((head == &q->new_flows) && !list_empty(&q->old_flows)) if ((head == &q->new_flows) && !list_empty(&q->old_flows))
...@@ -658,7 +654,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -658,7 +654,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch_tree_unlock(sch); sch_tree_unlock(sch);
} }
qs.backlog = q->backlogs[idx]; qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped; qs.drops = 0;
} }
if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1; return -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment