Commit b38c7eef authored by Thomas Graf's avatar Thomas Graf Committed by Thomas Graf

[PKT_SCHED]: GRED: Support ECN marking

Adds a new u8 flags in a unused padding area of the netlink
message. Adds ECN marking support to be used instead of dropping
packets immediately.
Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@mandriva.com>
parent d8f64e19
...@@ -220,8 +220,8 @@ struct tc_gred_sopt ...@@ -220,8 +220,8 @@ struct tc_gred_sopt
__u32 DPs; __u32 DPs;
__u32 def_DP; __u32 def_DP;
__u8 grio; __u8 grio;
__u8 pad1; __u8 flags;
__u16 pad2; __u16 pad1;
}; };
/* HTB section */ /* HTB section */
......
...@@ -55,6 +55,7 @@ struct gred_sched ...@@ -55,6 +55,7 @@ struct gred_sched
{ {
struct gred_sched_data *tab[MAX_DPs]; struct gred_sched_data *tab[MAX_DPs];
unsigned long flags; unsigned long flags;
u32 red_flags;
u32 DPs; u32 DPs;
u32 def; u32 def;
struct red_parms wred_set; struct red_parms wred_set;
...@@ -140,6 +141,11 @@ static inline void gred_store_wred_set(struct gred_sched *table, ...@@ -140,6 +141,11 @@ static inline void gred_store_wred_set(struct gred_sched *table,
table->wred_set.qavg = q->parms.qavg; table->wred_set.qavg = q->parms.qavg;
} }
static inline int gred_use_ecn(struct gred_sched *t)
{
return t->red_flags & TC_RED_ECN;
}
static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{ {
struct gred_sched_data *q=NULL; struct gred_sched_data *q=NULL;
...@@ -198,14 +204,23 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -198,14 +204,23 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++; q->stats.prob_drop++;
goto congestion_drop; goto congestion_drop;
}
q->stats.prob_mark++;
break;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++; q->stats.forced_drop++;
goto congestion_drop; goto congestion_drop;
} }
q->stats.forced_mark++;
break;
}
if (q->backlog + skb->len <= q->limit) { if (q->backlog + skb->len <= q->limit) {
q->backlog += skb->len; q->backlog += skb->len;
...@@ -348,6 +363,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps) ...@@ -348,6 +363,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
sch_tree_lock(sch); sch_tree_lock(sch);
table->DPs = sopt->DPs; table->DPs = sopt->DPs;
table->def = sopt->def_DP; table->def = sopt->def_DP;
table->red_flags = sopt->flags;
/* /*
* Every entry point to GRED is synchronized with the above code * Every entry point to GRED is synchronized with the above code
...@@ -489,6 +505,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -489,6 +505,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
.DPs = table->DPs, .DPs = table->DPs,
.def_DP = table->def, .def_DP = table->def,
.grio = gred_rio_mode(table), .grio = gred_rio_mode(table),
.flags = table->red_flags,
}; };
opts = RTA_NEST(skb, TCA_OPTIONS); opts = RTA_NEST(skb, TCA_OPTIONS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment