Commit 520ac30f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: drop packets after root qdisc lock is released

Qdisc performance suffers when packets are dropped at enqueue()
time because drops (kfree_skb()) are done while qdisc lock is held,
delaying a dequeue() draining the queue.

Nominal throughput can be reduced by 50 % when this happens,
at a time we would like the dequeue() to proceed as fast as possible.

Even FQ is vulnerable to this problem, while one of FQ goals was
to provide some flow isolation.

This patch adds a 'struct sk_buff **to_free' parameter to all
qdisc->enqueue(), and in qdisc_drop() helper.

I measured a performance increase of up to 12 %, but this patch
is a prereq so that future batches in enqueue() can fly.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 36195d86
...@@ -37,8 +37,10 @@ struct qdisc_size_table { ...@@ -37,8 +37,10 @@ struct qdisc_size_table {
}; };
struct Qdisc { struct Qdisc {
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); int (*enqueue)(struct sk_buff *skb,
struct sk_buff * (*dequeue)(struct Qdisc *dev); struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *sch);
unsigned int flags; unsigned int flags;
#define TCQ_F_BUILTIN 1 #define TCQ_F_BUILTIN 1
#define TCQ_F_INGRESS 2 #define TCQ_F_INGRESS 2
...@@ -160,7 +162,9 @@ struct Qdisc_ops { ...@@ -160,7 +162,9 @@ struct Qdisc_ops {
char id[IFNAMSIZ]; char id[IFNAMSIZ];
int priv_size; int priv_size;
int (*enqueue)(struct sk_buff *, struct Qdisc *); int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *);
...@@ -498,10 +502,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, ...@@ -498,10 +502,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
#endif #endif
} }
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
qdisc_calculate_pkt_len(skb, sch); qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch); return sch->enqueue(skb, sch, to_free);
} }
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
...@@ -626,24 +631,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) ...@@ -626,24 +631,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
return __qdisc_dequeue_head(sch, &sch->q); return __qdisc_dequeue_head(sch, &sch->q);
} }
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
{
skb->next = *to_free;
*to_free = skb;
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list) struct sk_buff_head *list,
struct sk_buff **to_free)
{ {
struct sk_buff *skb = __skb_dequeue(list); struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return len; return len;
} }
return 0; return 0;
} }
static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff **to_free)
{ {
return __qdisc_queue_drop_head(sch, &sch->q); return __qdisc_queue_drop_head(sch, &sch->q, to_free);
} }
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
...@@ -724,9 +741,11 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) ...@@ -724,9 +741,11 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
} }
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
kfree_skb(skb); __qdisc_drop(skb, to_free);
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
return NET_XMIT_DROP; return NET_XMIT_DROP;
......
...@@ -3070,6 +3070,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3070,6 +3070,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq) struct netdev_queue *txq)
{ {
spinlock_t *root_lock = qdisc_lock(q); spinlock_t *root_lock = qdisc_lock(q);
struct sk_buff *to_free = NULL;
bool contended; bool contended;
int rc; int rc;
...@@ -3086,7 +3087,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3086,7 +3087,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
spin_lock(root_lock); spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb); __qdisc_drop(skb, &to_free);
rc = NET_XMIT_DROP; rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
qdisc_run_begin(q)) { qdisc_run_begin(q)) {
...@@ -3109,7 +3110,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3109,7 +3110,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS; rc = NET_XMIT_SUCCESS;
} else { } else {
rc = q->enqueue(skb, q) & NET_XMIT_MASK; rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) { if (qdisc_run_begin(q)) {
if (unlikely(contended)) { if (unlikely(contended)) {
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
...@@ -3119,6 +3120,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3119,6 +3120,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
} }
} }
spin_unlock(root_lock); spin_unlock(root_lock);
if (unlikely(to_free))
kfree_skb_list(to_free);
if (unlikely(contended)) if (unlikely(contended))
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
return rc; return rc;
......
...@@ -357,7 +357,8 @@ static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, ...@@ -357,7 +357,8 @@ static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
/* --------------------------- Qdisc operations ---------------------------- */ /* --------------------------- Qdisc operations ---------------------------- */
static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow; struct atm_flow_data *flow;
...@@ -398,10 +399,10 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -398,10 +399,10 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
switch (result) { switch (result) {
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
kfree_skb(skb); __qdisc_drop(skb, to_free);
goto drop; goto drop;
case TC_ACT_RECLASSIFY: case TC_ACT_RECLASSIFY:
if (flow->excess) if (flow->excess)
...@@ -413,7 +414,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -413,7 +414,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#endif #endif
} }
ret = qdisc_enqueue(skb, flow->q); ret = qdisc_enqueue(skb, flow->q, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused drop: __maybe_unused
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
......
...@@ -17,9 +17,10 @@ ...@@ -17,9 +17,10 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -358,7 +358,8 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) ...@@ -358,7 +358,8 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
} }
static int static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
int uninitialized_var(ret); int uninitialized_var(ret);
...@@ -370,11 +371,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -370,11 +371,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl == NULL) { if (cl == NULL) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
......
...@@ -115,7 +115,8 @@ static void choke_zap_tail_holes(struct choke_sched_data *q) ...@@ -115,7 +115,8 @@ static void choke_zap_tail_holes(struct choke_sched_data *q)
} }
/* Drop packet from queue array by creating a "hole" */ /* Drop packet from queue array by creating a "hole" */
static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
struct sk_buff **to_free)
{ {
struct choke_sched_data *q = qdisc_priv(sch); struct choke_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = q->tab[idx]; struct sk_buff *skb = q->tab[idx];
...@@ -129,7 +130,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) ...@@ -129,7 +130,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
--sch->q.qlen; --sch->q.qlen;
} }
...@@ -261,7 +262,8 @@ static bool choke_match_random(const struct choke_sched_data *q, ...@@ -261,7 +262,8 @@ static bool choke_match_random(const struct choke_sched_data *q,
return choke_match_flow(oskb, nskb); return choke_match_flow(oskb, nskb);
} }
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
struct choke_sched_data *q = qdisc_priv(sch); struct choke_sched_data *q = qdisc_priv(sch);
...@@ -288,7 +290,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -288,7 +290,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Draw a packet at random from queue and compare flow */ /* Draw a packet at random from queue and compare flow */
if (choke_match_random(q, skb, &idx)) { if (choke_match_random(q, skb, &idx)) {
q->stats.matched++; q->stats.matched++;
choke_drop_by_idx(sch, idx); choke_drop_by_idx(sch, idx, to_free);
goto congestion_drop; goto congestion_drop;
} }
...@@ -331,16 +333,16 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -331,16 +333,16 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
q->stats.pdrop++; q->stats.pdrop++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
congestion_drop: congestion_drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
other_drop: other_drop:
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
......
...@@ -82,7 +82,8 @@ static void drop_func(struct sk_buff *skb, void *ctx) ...@@ -82,7 +82,8 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{ {
struct Qdisc *sch = ctx; struct Qdisc *sch = ctx;
qdisc_drop(skb, sch); kfree_skb(skb);
qdisc_qstats_drop(sch);
} }
static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
...@@ -107,7 +108,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) ...@@ -107,7 +108,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
return skb; return skb;
} }
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct codel_sched_data *q; struct codel_sched_data *q;
...@@ -117,7 +119,7 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -117,7 +119,7 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
q = qdisc_priv(sch); q = qdisc_priv(sch);
q->drop_overlimit++; q->drop_overlimit++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
......
...@@ -350,7 +350,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -350,7 +350,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL; return NULL;
} }
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct drr_sched *q = qdisc_priv(sch); struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl; struct drr_class *cl;
...@@ -360,11 +361,11 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -360,11 +361,11 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return err; return err;
} }
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -191,7 +191,8 @@ static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, ...@@ -191,7 +191,8 @@ static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
/* --------------------------- Qdisc operations ---------------------------- */ /* --------------------------- Qdisc operations ---------------------------- */
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct dsmark_qdisc_data *p = qdisc_priv(sch); struct dsmark_qdisc_data *p = qdisc_priv(sch);
int err; int err;
...@@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
...@@ -251,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -251,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
} }
err = qdisc_enqueue(skb, p->q); err = qdisc_enqueue(skb, p->q, to_free);
if (err != NET_XMIT_SUCCESS) { if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err)) if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -264,7 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -264,7 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
drop: drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
......
...@@ -19,29 +19,32 @@ ...@@ -19,29 +19,32 @@
/* 1 band FIFO pseudo-"scheduler" */ /* 1 band FIFO pseudo-"scheduler" */
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
if (likely(skb_queue_len(&sch->q) < sch->limit)) if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
if (likely(skb_queue_len(&sch->q) < sch->limit)) if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */ /* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q); __qdisc_queue_drop_head(sch, &sch->q, to_free);
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch); qdisc_enqueue_tail(skb, sch);
......
...@@ -368,18 +368,19 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) ...@@ -368,18 +368,19 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
} }
} }
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct fq_sched_data *q = qdisc_priv(sch); struct fq_sched_data *q = qdisc_priv(sch);
struct fq_flow *f; struct fq_flow *f;
if (unlikely(sch->q.qlen >= sch->limit)) if (unlikely(sch->q.qlen >= sch->limit))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
f = fq_classify(skb, q); f = fq_classify(skb, q);
if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
q->stat_flows_plimit++; q->stat_flows_plimit++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
f->qlen++; f->qlen++;
......
...@@ -139,7 +139,8 @@ static inline void flow_queue_add(struct fq_codel_flow *flow, ...@@ -139,7 +139,8 @@ static inline void flow_queue_add(struct fq_codel_flow *flow,
skb->next = NULL; skb->next = NULL;
} }
static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
struct sk_buff **to_free)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb; struct sk_buff *skb;
...@@ -172,7 +173,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) ...@@ -172,7 +173,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
skb = dequeue_head(flow); skb = dequeue_head(flow);
len += qdisc_pkt_len(skb); len += qdisc_pkt_len(skb);
mem += skb->truesize; mem += skb->truesize;
kfree_skb(skb); __qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold); } while (++i < max_packets && len < threshold);
flow->dropped += i; flow->dropped += i;
...@@ -184,7 +185,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) ...@@ -184,7 +185,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
return idx; return idx;
} }
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
unsigned int idx, prev_backlog, prev_qlen; unsigned int idx, prev_backlog, prev_qlen;
...@@ -197,7 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -197,7 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (idx == 0) { if (idx == 0) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
idx--; idx--;
...@@ -229,7 +231,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -229,7 +231,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* So instead of dropping a single packet, drop half of its backlog * So instead of dropping a single packet, drop half of its backlog
* with a 64 packets limit to not add a too big cpu spike here. * with a 64 packets limit to not add a too big cpu spike here.
*/ */
ret = fq_codel_drop(sch, q->drop_batch_size); ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
prev_qlen -= sch->q.qlen; prev_qlen -= sch->q.qlen;
prev_backlog -= sch->qstats.backlog; prev_backlog -= sch->qstats.backlog;
...@@ -276,7 +278,8 @@ static void drop_func(struct sk_buff *skb, void *ctx) ...@@ -276,7 +278,8 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{ {
struct Qdisc *sch = ctx; struct Qdisc *sch = ctx;
qdisc_drop(skb, sch); kfree_skb(skb);
qdisc_qstats_drop(sch);
} }
static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
......
...@@ -348,9 +348,10 @@ EXPORT_SYMBOL(netif_carrier_off); ...@@ -348,9 +348,10 @@ EXPORT_SYMBOL(netif_carrier_off);
cheaper. cheaper.
*/ */
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{ {
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
...@@ -439,7 +440,8 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, ...@@ -439,7 +440,8 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band; return priv->q + band;
} }
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{ {
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX]; int band = prio2band[skb->priority & TC_PRIO_MAX];
...@@ -451,7 +453,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) ...@@ -451,7 +453,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
return __qdisc_enqueue_tail(skb, qdisc, list); return __qdisc_enqueue_tail(skb, qdisc, list);
} }
return qdisc_drop(skb, qdisc); return qdisc_drop(skb, qdisc, to_free);
} }
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
......
...@@ -149,7 +149,8 @@ static inline int gred_use_harddrop(struct gred_sched *t) ...@@ -149,7 +149,8 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP; return t->red_flags & TC_RED_HARDDROP;
} }
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct gred_sched_data *q = NULL; struct gred_sched_data *q = NULL;
struct gred_sched *t = qdisc_priv(sch); struct gred_sched *t = qdisc_priv(sch);
...@@ -237,10 +238,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -237,10 +238,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->stats.pdrop++; q->stats.pdrop++;
drop: drop:
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
congestion_drop: congestion_drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
......
...@@ -1572,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1572,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
} }
static int static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct hfsc_class *cl; struct hfsc_class *cl;
int uninitialized_var(err); int uninitialized_var(err);
...@@ -1581,11 +1581,11 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1581,11 +1581,11 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return err; return err;
} }
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -345,7 +345,7 @@ static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) ...@@ -345,7 +345,7 @@ static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
skb->next = NULL; skb->next = NULL;
} }
static unsigned int hhf_drop(struct Qdisc *sch) static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
struct wdrr_bucket *bucket; struct wdrr_bucket *bucket;
...@@ -359,16 +359,16 @@ static unsigned int hhf_drop(struct Qdisc *sch) ...@@ -359,16 +359,16 @@ static unsigned int hhf_drop(struct Qdisc *sch)
struct sk_buff *skb = dequeue_head(bucket); struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--; sch->q.qlen--;
qdisc_qstats_drop(sch);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); qdisc_drop(skb, sch, to_free);
} }
/* Return id of the bucket from which the packet was dropped. */ /* Return id of the bucket from which the packet was dropped. */
return bucket - q->buckets; return bucket - q->buckets;
} }
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx; enum wdrr_bucket_idx idx;
...@@ -406,7 +406,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -406,7 +406,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Return Congestion Notification only if we dropped a packet from this /* Return Congestion Notification only if we dropped a packet from this
* bucket. * bucket.
*/ */
if (hhf_drop(sch) == idx) if (hhf_drop(sch, to_free) == idx)
return NET_XMIT_CN; return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */ /* As we dropped a packet, better let upper stack know this. */
......
...@@ -569,7 +569,8 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) ...@@ -569,7 +569,8 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
list_del_init(&cl->un.leaf.drop_list); list_del_init(&cl->un.leaf.drop_list);
} }
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
int uninitialized_var(ret); int uninitialized_var(ret);
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
...@@ -581,16 +582,17 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -581,16 +582,17 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
__skb_queue_tail(&q->direct_queue, skb); __skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++; q->direct_pkts++;
} else { } else {
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
#endif #endif
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -65,7 +65,8 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -65,7 +65,8 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
} }
static int static int
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret; int ret;
...@@ -76,12 +77,12 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -76,12 +77,12 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
#endif #endif
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
......
...@@ -397,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) ...@@ -397,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
* when we statistically choose to corrupt one, we instead segment it, returning * when we statistically choose to corrupt one, we instead segment it, returning
* the first packet to be corrupted, and re-enqueue the remaining frames * the first packet to be corrupted, and re-enqueue the remaining frames
*/ */
static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct sk_buff *segs; struct sk_buff *segs;
netdev_features_t features = netif_skb_features(skb); netdev_features_t features = netif_skb_features(skb);
...@@ -405,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -405,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) { if (IS_ERR_OR_NULL(segs)) {
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NULL; return NULL;
} }
consume_skb(skb); consume_skb(skb);
...@@ -418,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -418,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
* NET_XMIT_DROP: queue length didn't change. * NET_XMIT_DROP: queue length didn't change.
* NET_XMIT_SUCCESS: one skb was queued. * NET_XMIT_SUCCESS: one skb was queued.
*/ */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
/* We don't fill cb now as skb_unshare() may invalidate it */ /* We don't fill cb now as skb_unshare() may invalidate it */
...@@ -443,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -443,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
if (count == 0) { if (count == 0) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
...@@ -463,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -463,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0; q->duplicate = 0;
rootq->enqueue(skb2, rootq); rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave; q->duplicate = dupsave;
} }
...@@ -475,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -475,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/ */
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
segs = netem_segment(skb, sch); segs = netem_segment(skb, sch, to_free);
if (!segs) if (!segs)
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
...@@ -488,7 +490,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -488,7 +490,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL && (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb))) { skb_checksum_help(skb))) {
rc = qdisc_drop(skb, sch); rc = qdisc_drop(skb, sch, to_free);
goto finish_segs; goto finish_segs;
} }
...@@ -497,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -497,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
...@@ -557,7 +559,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -557,7 +559,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
segs->next = NULL; segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len; last_len = segs->len;
rc = qdisc_enqueue(segs, sch); rc = qdisc_enqueue(segs, sch, to_free);
if (rc != NET_XMIT_SUCCESS) { if (rc != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(rc)) if (net_xmit_drop_count(rc))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -615,8 +617,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -615,8 +617,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
#endif #endif
if (q->qdisc) { if (q->qdisc) {
int err = qdisc_enqueue(skb, q->qdisc); struct sk_buff *to_free = NULL;
int err;
err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -134,7 +134,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size) ...@@ -134,7 +134,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
return false; return false;
} }
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct pie_sched_data *q = qdisc_priv(sch); struct pie_sched_data *q = qdisc_priv(sch);
bool enqueue = false; bool enqueue = false;
...@@ -166,7 +167,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -166,7 +167,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
out: out:
q->stats.dropped++; q->stats.dropped++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
......
...@@ -88,7 +88,8 @@ struct plug_sched_data { ...@@ -88,7 +88,8 @@ struct plug_sched_data {
u32 pkts_to_release; u32 pkts_to_release;
}; };
static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct plug_sched_data *q = qdisc_priv(sch); struct plug_sched_data *q = qdisc_priv(sch);
...@@ -98,7 +99,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -98,7 +99,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
} }
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static struct sk_buff *plug_dequeue(struct Qdisc *sch) static struct sk_buff *plug_dequeue(struct Qdisc *sch)
......
...@@ -67,7 +67,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -67,7 +67,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
} }
static int static int
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret; int ret;
...@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
#endif #endif
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
......
...@@ -1217,7 +1217,8 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) ...@@ -1217,7 +1217,8 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
return agg; return agg;
} }
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct qfq_sched *q = qdisc_priv(sch); struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl; struct qfq_class *cl;
...@@ -1240,11 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1240,11 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
qdisc_pkt_len(skb)); qdisc_pkt_len(skb));
if (err) { if (err) {
cl->qstats.drops++; cl->qstats.drops++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
} }
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err); pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
......
...@@ -56,7 +56,8 @@ static inline int red_use_harddrop(struct red_sched_data *q) ...@@ -56,7 +56,8 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP; return q->flags & TC_RED_HARDDROP;
} }
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc; struct Qdisc *child = q->qdisc;
...@@ -95,7 +96,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -95,7 +96,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
} }
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
...@@ -106,7 +107,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -106,7 +107,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
congestion_drop: congestion_drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
......
...@@ -275,7 +275,8 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, ...@@ -275,7 +275,8 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
return false; return false;
} }
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct sfb_sched_data *q = qdisc_priv(sch); struct sfb_sched_data *q = qdisc_priv(sch);
...@@ -397,7 +398,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -397,7 +398,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
enqueue: enqueue:
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++; sch->q.qlen++;
increment_qlen(skb, q); increment_qlen(skb, q);
...@@ -408,7 +409,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -408,7 +409,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
drop: drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
other_drop: other_drop:
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
......
...@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) ...@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q)
} }
static int static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct sfq_sched_data *q = qdisc_priv(sch); struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash, dropped; unsigned int hash, dropped;
...@@ -367,7 +367,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -367,7 +367,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (x == SFQ_EMPTY_SLOT) { if (x == SFQ_EMPTY_SLOT) {
x = q->dep[0].next; /* get a free slot */ x = q->dep[0].next; /* get a free slot */
if (x >= SFQ_MAX_FLOWS) if (x >= SFQ_MAX_FLOWS)
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
q->ht[hash] = x; q->ht[hash] = x;
slot = &q->slots[x]; slot = &q->slots[x];
slot->hash = hash; slot->hash = hash;
...@@ -424,14 +424,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -424,14 +424,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (slot->qlen >= q->maxdepth) { if (slot->qlen >= q->maxdepth) {
congestion_drop: congestion_drop:
if (!sfq_headdrop(q)) if (!sfq_headdrop(q))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
/* We know we have at least one packet in queue */ /* We know we have at least one packet in queue */
head = slot_dequeue_head(slot); head = slot_dequeue_head(slot);
delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
sch->qstats.backlog -= delta; sch->qstats.backlog -= delta;
slot->backlog -= delta; slot->backlog -= delta;
qdisc_drop(head, sch); qdisc_drop(head, sch, to_free);
slot_queue_add(slot, skb); slot_queue_add(slot, skb);
return NET_XMIT_CN; return NET_XMIT_CN;
......
...@@ -155,7 +155,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) ...@@ -155,7 +155,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
/* GSO packet is too big, segment it so that tbf can transmit /* GSO packet is too big, segment it so that tbf can transmit
* each segment in time * each segment in time
*/ */
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb; struct sk_buff *segs, *nskb;
...@@ -166,7 +167,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -166,7 +167,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) if (IS_ERR_OR_NULL(segs))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
nb = 0; nb = 0;
while (segs) { while (segs) {
...@@ -174,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -174,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
segs->next = NULL; segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len; len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc); ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -190,17 +191,18 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -190,17 +191,18 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
} }
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
int ret; int ret;
if (qdisc_pkt_len(skb) > q->max_size) { if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
return tbf_segment(skb, sch); return tbf_segment(skb, sch, to_free);
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
ret = qdisc_enqueue(skb, q->qdisc); ret = qdisc_enqueue(skb, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -77,7 +77,7 @@ struct teql_sched_data { ...@@ -77,7 +77,7 @@ struct teql_sched_data {
/* "teql*" qdisc routines */ /* "teql*" qdisc routines */
static int static int
teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch); struct teql_sched_data *q = qdisc_priv(sch);
...@@ -87,7 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -87,7 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static struct sk_buff * static struct sk_buff *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment