Commit f9eb8aea authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: transform qdisc running bit into a seqcount

Instead of using a single bit (__QDISC___STATE_RUNNING)
in sch->__state, use a seqcount.

This adds lockdep support, but more importantly it will allow us
to sample qdisc/class statistics without having to grab qdisc root lock.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 64151ae3
......@@ -4610,6 +4610,7 @@ static int bond_check_params(struct bond_params *params)
static struct lock_class_key bonding_netdev_xmit_lock_key;
static struct lock_class_key bonding_netdev_addr_lock_key;
static struct lock_class_key bonding_tx_busylock_key;
static struct lock_class_key bonding_qdisc_running_key;
static void bond_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
......@@ -4625,6 +4626,7 @@ static void bond_set_lockdep_class(struct net_device *dev)
&bonding_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
dev->qdisc_running_key = &bonding_qdisc_running_key;
}
/* Called from registration process */
......
......@@ -1313,9 +1313,12 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
}
static struct lock_class_key ppp_tx_busylock;
static struct lock_class_key ppp_qdisc_running_key;
static int ppp_dev_init(struct net_device *dev)
{
dev->qdisc_tx_busylock = &ppp_tx_busylock;
dev->qdisc_running_key = &ppp_qdisc_running_key;
return 0;
}
......
......@@ -1577,6 +1577,7 @@ static const struct team_option team_options[] = {
static struct lock_class_key team_netdev_xmit_lock_key;
static struct lock_class_key team_netdev_addr_lock_key;
static struct lock_class_key team_tx_busylock_key;
static struct lock_class_key team_qdisc_running_key;
static void team_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
......@@ -1590,6 +1591,7 @@ static void team_set_lockdep_class(struct net_device *dev)
lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
dev->qdisc_tx_busylock = &team_tx_busylock_key;
dev->qdisc_running_key = &team_qdisc_running_key;
}
static int team_init(struct net_device *dev)
......
......@@ -1862,6 +1862,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
......
......@@ -29,13 +29,6 @@ enum qdisc_state_t {
__QDISC_STATE_THROTTLED,
};
/*
* following bits are only changed while qdisc lock is held
*/
enum qdisc___state_t {
__QDISC___STATE_RUNNING = 1,
};
struct qdisc_size_table {
struct rcu_head rcu;
struct list_head list;
......@@ -93,7 +86,7 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
unsigned int __state;
seqcount_t running;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
int padded;
......@@ -104,20 +97,20 @@ struct Qdisc {
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc_is_running(qdisc))
return false;
qdisc->__state |= __QDISC___STATE_RUNNING;
write_seqcount_begin(&qdisc->running);
return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
qdisc->__state &= ~__QDISC___STATE_RUNNING;
write_seqcount_end(&qdisc->running);
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
......
......@@ -629,6 +629,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
static struct lock_class_key bt_tx_busylock;
static struct lock_class_key bt_netdev_xmit_lock_key;
static struct lock_class_key bt_qdisc_running_key;
static void bt_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
......@@ -641,6 +642,7 @@ static int bt_dev_init(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL);
dev->qdisc_tx_busylock = &bt_tx_busylock;
dev->qdisc_running_key = &bt_qdisc_running_key;
return 0;
}
......
......@@ -3075,7 +3075,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC___STATE_RUNNING owner to get the lock more
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
contended = qdisc_is_running(q);
......
......@@ -60,6 +60,7 @@ static struct header_ops lowpan_header_ops = {
static struct lock_class_key lowpan_tx_busylock;
static struct lock_class_key lowpan_netdev_xmit_lock_key;
static struct lock_class_key lowpan_qdisc_running_key;
static void lowpan_set_lockdep_class_one(struct net_device *ldev,
struct netdev_queue *txq,
......@@ -73,6 +74,8 @@ static int lowpan_dev_init(struct net_device *ldev)
{
netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL);
ldev->qdisc_tx_busylock = &lowpan_tx_busylock;
ldev->qdisc_running_key = &lowpan_qdisc_running_key;
return 0;
}
......
......@@ -68,6 +68,8 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
}
static struct lock_class_key l2tp_eth_tx_busylock;
static struct lock_class_key l2tp_qdisc_running_key;
static int l2tp_eth_dev_init(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
......@@ -76,6 +78,8 @@ static int l2tp_eth_dev_init(struct net_device *dev)
eth_hw_addr_random(dev);
eth_broadcast_addr(dev->broadcast);
dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
dev->qdisc_running_key = &l2tp_qdisc_running_key;
return 0;
}
......
......@@ -110,7 +110,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
/*
* Transmit possibly several skbs, and handle the return status as
* required. Holding the __QDISC___STATE_RUNNING bit guarantees that
* required. Owning running seqcount bit guarantees that
* only one CPU can execute this function.
*
* Returns to the caller:
......@@ -137,10 +137,10 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_UNLOCK(dev, txq);
} else {
spin_lock(root_lock);
spin_lock_nested(root_lock, SINGLE_DEPTH_NESTING);
return qdisc_qlen(q);
}
spin_lock(root_lock);
spin_lock_nested(root_lock, SINGLE_DEPTH_NESTING);
if (dev_xmit_complete(ret)) {
/* Driver sent out skb successfully or skb was consumed */
......@@ -163,7 +163,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/*
* NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
* __QDISC___STATE_RUNNING guarantees only one CPU can process
* running seqcount guarantees only one CPU can process
* this qdisc at a time. qdisc_lock(q) serializes queue accesses for
* this queue.
*
......@@ -379,6 +379,7 @@ struct Qdisc noop_qdisc = {
.list = LIST_HEAD_INIT(noop_qdisc.list),
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
};
EXPORT_SYMBOL(noop_qdisc);
......@@ -537,6 +538,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops)
......@@ -570,6 +572,10 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
seqcount_init(&sch->running);
lockdep_set_class(&sch->running,
dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment