Commit e43ac79a authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

sch_tbf: segment too big GSO packets

If a GSO packet has a length above tbf burst limit, the packet
is currently silently dropped.

Current way to handle this is to set the device in non GSO/TSO mode, or
setting high bursts, and its sub optimal.

We can actually segment too big GSO packets, and send individual
segments as tbf parameters allow, allowing for better interoperability.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Reviewed-by: default avatarJiri Pirko <jiri@resnulli.us>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ffed61e6
...@@ -116,14 +116,57 @@ struct tbf_sched_data { ...@@ -116,14 +116,57 @@ struct tbf_sched_data {
struct qdisc_watchdog watchdog; /* Watchdog timer */ struct qdisc_watchdog watchdog; /* Watchdog timer */
}; };
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
return qdisc_reshape_fail(skb, sch);
nb = 0;
while (segs) {
nskb = segs->next;
segs->next = NULL;
if (likely(segs->len <= q->max_size)) {
qdisc_skb_cb(segs)->pkt_len = segs->len;
ret = qdisc_enqueue(segs, q->qdisc);
} else {
ret = qdisc_reshape_fail(skb, sch);
}
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
} else {
nb++;
}
segs = nskb;
}
sch->q.qlen += nb;
if (nb > 1)
qdisc_tree_decrease_qlen(sch, 1 - nb);
consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
int ret; int ret;
if (qdisc_pkt_len(skb) > q->max_size) if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb))
return tbf_segment(skb, sch);
return qdisc_reshape_fail(skb, sch); return qdisc_reshape_fail(skb, sch);
}
ret = qdisc_enqueue(skb, q->qdisc); ret = qdisc_enqueue(skb, q->qdisc);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment