Commit aebe4426 authored by Petr Machata's avatar Petr Machata Committed by David S. Miller

net: sched: Pass root lock to Qdisc_ops.enqueue

A following patch introduces qevents, points in qdisc algorithm where
packet can be processed by user-defined filters. Should this processing
lead to a situation where a new packet is to be enqueued on the same port,
holding the root lock would lead to deadlocks. To solve the issue, qevent
handler needs to unlock and relock the root lock when necessary.

To that end, add the root lock argument to the qdisc op enqueue, and
propagate throughout.
Signed-off-by: default avatarPetr Machata <petrm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e701e49
...@@ -57,6 +57,7 @@ struct qdisc_skb_head { ...@@ -57,6 +57,7 @@ struct qdisc_skb_head {
struct Qdisc { struct Qdisc {
int (*enqueue)(struct sk_buff *skb, int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch, struct Qdisc *sch,
spinlock_t *root_lock,
struct sk_buff **to_free); struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *sch); struct sk_buff * (*dequeue)(struct Qdisc *sch);
unsigned int flags; unsigned int flags;
...@@ -241,6 +242,7 @@ struct Qdisc_ops { ...@@ -241,6 +242,7 @@ struct Qdisc_ops {
int (*enqueue)(struct sk_buff *skb, int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch, struct Qdisc *sch,
spinlock_t *root_lock,
struct sk_buff **to_free); struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *);
...@@ -788,11 +790,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, ...@@ -788,11 +790,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
#endif #endif
} }
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
qdisc_calculate_pkt_len(skb, sch); qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch, to_free); return sch->enqueue(skb, sch, root_lock, to_free);
} }
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
......
...@@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q); qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) { if (q->flags & TCQ_F_NOLOCK) {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK;
qdisc_run(q); qdisc_run(q);
if (unlikely(to_free)) if (unlikely(to_free))
...@@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_run_end(q); qdisc_run_end(q);
rc = NET_XMIT_SUCCESS; rc = NET_XMIT_SUCCESS;
} else { } else {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) { if (qdisc_run_begin(q)) {
if (unlikely(contended)) { if (unlikely(contended)) {
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
......
...@@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl, ...@@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
/* --------------------------- Qdisc operations ---------------------------- */ /* --------------------------- Qdisc operations ---------------------------- */
static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_qdisc_data *p = qdisc_priv(sch);
...@@ -432,7 +432,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -432,7 +432,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
#endif #endif
} }
ret = qdisc_enqueue(skb, flow->q, to_free); ret = qdisc_enqueue(skb, flow->q, root_lock, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused drop: __maybe_unused
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
qdisc_drop(skb, sch, to_free); qdisc_drop(skb, sch, to_free);
......
...@@ -1687,7 +1687,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, ...@@ -1687,7 +1687,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
static void cake_reconfigure(struct Qdisc *sch); static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct cake_sched_data *q = qdisc_priv(sch); struct cake_sched_data *q = qdisc_priv(sch);
......
...@@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) ...@@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
} }
static int static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
...@@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return ret; return ret;
} }
ret = qdisc_enqueue(skb, cl->q, to_free); ret = qdisc_enqueue(skb, cl->q, root_lock, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
......
...@@ -77,7 +77,7 @@ struct cbs_sched_data { ...@@ -77,7 +77,7 @@ struct cbs_sched_data {
s64 sendslope; /* in bytes/s */ s64 sendslope; /* in bytes/s */
s64 idleslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */
struct qdisc_watchdog watchdog; struct qdisc_watchdog watchdog;
int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free); struct sk_buff **to_free);
struct sk_buff *(*dequeue)(struct Qdisc *sch); struct sk_buff *(*dequeue)(struct Qdisc *sch);
struct Qdisc *qdisc; struct Qdisc *qdisc;
...@@ -85,13 +85,13 @@ struct cbs_sched_data { ...@@ -85,13 +85,13 @@ struct cbs_sched_data {
}; };
static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child, struct Qdisc *child, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
int err; int err;
err = child->ops->enqueue(skb, child, to_free); err = child->ops->enqueue(skb, child, root_lock, to_free);
if (err != NET_XMIT_SUCCESS) if (err != NET_XMIT_SUCCESS)
return err; return err;
...@@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct cbs_sched_data *q = qdisc_priv(sch); struct cbs_sched_data *q = qdisc_priv(sch);
struct Qdisc *qdisc = q->qdisc; struct Qdisc *qdisc = q->qdisc;
return cbs_child_enqueue(skb, sch, qdisc, to_free); return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free);
} }
static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct cbs_sched_data *q = qdisc_priv(sch); struct cbs_sched_data *q = qdisc_priv(sch);
...@@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, ...@@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
q->last = ktime_get_ns(); q->last = ktime_get_ns();
} }
return cbs_child_enqueue(skb, sch, qdisc, to_free); return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free);
} }
static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct cbs_sched_data *q = qdisc_priv(sch); struct cbs_sched_data *q = qdisc_priv(sch);
return q->enqueue(skb, sch, to_free); return q->enqueue(skb, sch, root_lock, to_free);
} }
/* timediff is in ns, slope is in bytes/s */ /* timediff is in ns, slope is in bytes/s */
......
...@@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q, ...@@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
return choke_match_flow(oskb, nskb); return choke_match_flow(oskb, nskb);
} }
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct choke_sched_data *q = qdisc_priv(sch); struct choke_sched_data *q = qdisc_priv(sch);
......
...@@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) ...@@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
return skb; return skb;
} }
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct codel_sched_data *q; struct codel_sched_data *q;
......
...@@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL; return NULL;
} }
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
...@@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
} }
first = !cl->qdisc->q.qlen; first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free); err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, ...@@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
/* --------------------------- Qdisc operations ---------------------------- */ /* --------------------------- Qdisc operations ---------------------------- */
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
...@@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
} }
} }
err = qdisc_enqueue(skb, p->q, to_free); err = qdisc_enqueue(skb, p->q, root_lock, to_free);
if (err != NET_XMIT_SUCCESS) { if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err)) if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) ...@@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
} }
static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
struct sk_buff **to_free) spinlock_t *root_lock, struct sk_buff **to_free)
{ {
struct etf_sched_data *q = qdisc_priv(sch); struct etf_sched_data *q = qdisc_priv(sch);
struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
......
...@@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
return &q->classes[band]; return &q->classes[band];
} }
static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
...@@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
} }
first = !cl->qdisc->q.qlen; first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free); err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
/* 1 band FIFO pseudo-"scheduler" */ /* 1 band FIFO pseudo-"scheduler" */
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
...@@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
} }
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
if (likely(sch->q.qlen < sch->limit)) if (likely(sch->q.qlen < sch->limit))
...@@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
} }
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int prev_backlog; unsigned int prev_backlog;
......
...@@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb, ...@@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon));
} }
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct fq_sched_data *q = qdisc_priv(sch); struct fq_sched_data *q = qdisc_priv(sch);
......
...@@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, ...@@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
return idx; return idx;
} }
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
......
...@@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow, ...@@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow,
skb->next = NULL; skb->next = NULL;
} }
static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct fq_pie_sched_data *q = qdisc_priv(sch); struct fq_pie_sched_data *q = qdisc_priv(sch);
......
...@@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off); ...@@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off);
cheaper. cheaper.
*/ */
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
__qdisc_drop(skb, to_free); __qdisc_drop(skb, to_free);
...@@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, ...@@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
return &priv->q[band]; return &priv->q[band];
} }
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
int band = prio2band[skb->priority & TC_PRIO_MAX]; int band = prio2band[skb->priority & TC_PRIO_MAX];
......
...@@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table) ...@@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table)
return false; return false;
} }
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct gred_sched_data *q = NULL; struct gred_sched_data *q = NULL;
......
...@@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
return -1; return -1;
} }
static int static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
struct hfsc_class *cl; struct hfsc_class *cl;
...@@ -1545,7 +1545,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) ...@@ -1545,7 +1545,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
} }
first = !cl->qdisc->q.qlen; first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free); err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free) ...@@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
return bucket - q->buckets; return bucket - q->buckets;
} }
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
......
...@@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) ...@@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
cl->prio_activity = 0; cl->prio_activity = 0;
} }
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
int uninitialized_var(ret); int uninitialized_var(ret);
...@@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free); __qdisc_drop(skb, to_free);
return ret; return ret;
#endif #endif
} else if ((ret = qdisc_enqueue(skb, cl->leaf.q, } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock,
to_free)) != NET_XMIT_SUCCESS) { to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
} }
static int static int
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
...@@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
} }
#endif #endif
ret = qdisc_enqueue(skb, qdisc, to_free); ret = qdisc_enqueue(skb, qdisc, root_lock, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
......
...@@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, ...@@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
* NET_XMIT_DROP: queue length didn't change. * NET_XMIT_DROP: queue length didn't change.
* NET_XMIT_SUCCESS: one skb was queued. * NET_XMIT_SUCCESS: one skb was queued.
*/ */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
...@@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0; q->duplicate = 0;
rootq->enqueue(skb2, rootq, to_free); rootq->enqueue(skb2, rootq, root_lock, to_free);
q->duplicate = dupsave; q->duplicate = dupsave;
rc_drop = NET_XMIT_SUCCESS; rc_drop = NET_XMIT_SUCCESS;
} }
...@@ -604,7 +604,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -604,7 +604,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_mark_not_on_list(segs); skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len; last_len = segs->len;
rc = qdisc_enqueue(segs, sch, to_free); rc = qdisc_enqueue(segs, sch, root_lock, to_free);
if (rc != NET_XMIT_SUCCESS) { if (rc != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(rc)) if (net_xmit_drop_count(rc))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -720,7 +720,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -720,7 +720,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct sk_buff *to_free = NULL; struct sk_buff *to_free = NULL;
int err; int err;
err = qdisc_enqueue(skb, q->qdisc, &to_free); err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free);
kfree_skb_list(to_free); kfree_skb_list(to_free);
if (err != NET_XMIT_SUCCESS && if (err != NET_XMIT_SUCCESS &&
net_xmit_drop_count(err)) { net_xmit_drop_count(err)) {
......
...@@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, ...@@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
} }
EXPORT_SYMBOL_GPL(pie_drop_early); EXPORT_SYMBOL_GPL(pie_drop_early);
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct pie_sched_data *q = qdisc_priv(sch); struct pie_sched_data *q = qdisc_priv(sch);
......
...@@ -84,7 +84,7 @@ struct plug_sched_data { ...@@ -84,7 +84,7 @@ struct plug_sched_data {
u32 pkts_to_release; u32 pkts_to_release;
}; };
static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct plug_sched_data *q = qdisc_priv(sch); struct plug_sched_data *q = qdisc_priv(sch);
......
...@@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return q->queues[band]; return q->queues[band];
} }
static int static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *qdisc; struct Qdisc *qdisc;
...@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) ...@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
} }
#endif #endif
ret = qdisc_enqueue(skb, qdisc, to_free); ret = qdisc_enqueue(skb, qdisc, root_lock, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->qstats.backlog += len; sch->qstats.backlog += len;
sch->q.qlen++; sch->q.qlen++;
......
...@@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) ...@@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
return agg; return agg;
} }
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
unsigned int len = qdisc_pkt_len(skb), gso_segs; unsigned int len = qdisc_pkt_len(skb), gso_segs;
...@@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
first = !cl->qdisc->q.qlen; first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free); err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err); pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
......
...@@ -65,7 +65,7 @@ static int red_use_nodrop(struct red_sched_data *q) ...@@ -65,7 +65,7 @@ static int red_use_nodrop(struct red_sched_data *q)
return q->flags & TC_RED_NODROP; return q->flags & TC_RED_NODROP;
} }
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
...@@ -118,7 +118,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -118,7 +118,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break; break;
} }
ret = qdisc_enqueue(skb, child, to_free); ret = qdisc_enqueue(skb, child, root_lock, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
......
...@@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, ...@@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
return false; return false;
} }
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
...@@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
} }
enqueue: enqueue:
ret = qdisc_enqueue(skb, child, to_free); ret = qdisc_enqueue(skb, child, root_lock, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
......
...@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) ...@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q)
} }
static int static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free)
{ {
struct sfq_sched_data *q = qdisc_priv(sch); struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash, dropped; unsigned int hash, dropped;
......
...@@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q) ...@@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
return SKBPRIO_MAX_PRIORITY - 1; return SKBPRIO_MAX_PRIORITY - 1;
} }
static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
......
...@@ -410,7 +410,7 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) ...@@ -410,7 +410,7 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
return txtime; return txtime;
} }
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
...@@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return qdisc_enqueue(skb, child, to_free); return qdisc_enqueue(skb, child, root_lock, to_free);
} }
static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
......
...@@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch) ...@@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch)
/* GSO packet is too big, segment it so that tbf can transmit /* GSO packet is too big, segment it so that tbf can transmit
* each segment in time * each segment in time
*/ */
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
...@@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, ...@@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
skb_mark_not_on_list(segs); skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len; len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc, to_free); ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, ...@@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
} }
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
...@@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (qdisc_pkt_len(skb) > q->max_size) { if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && if (skb_is_gso(skb) &&
skb_gso_validate_mac_len(skb, q->max_size)) skb_gso_validate_mac_len(skb, q->max_size))
return tbf_segment(skb, sch, to_free); return tbf_segment(skb, sch, root_lock, to_free);
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
} }
ret = qdisc_enqueue(skb, q->qdisc, to_free); ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -72,8 +72,8 @@ struct teql_sched_data { ...@@ -72,8 +72,8 @@ struct teql_sched_data {
/* "teql*" qdisc routines */ /* "teql*" qdisc routines */
static int static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch); struct teql_sched_data *q = qdisc_priv(sch);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment