Commit 5a95ca41 authored by Felix Fietkau's avatar Felix Fietkau

mt76: keep a set of software tx queues per phy

Allows tracking tx scheduling separately per phy
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 96747a51
...@@ -30,7 +30,7 @@ int mt76_queues_read(struct seq_file *s, void *data) ...@@ -30,7 +30,7 @@ int mt76_queues_read(struct seq_file *s, void *data)
struct mt76_dev *dev = dev_get_drvdata(s->private); struct mt76_dev *dev = dev_get_drvdata(s->private);
int i; int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { for (i = 0; i < __MT_TXQ_MAX; i++) {
struct mt76_sw_queue *q = &dev->q_tx[i]; struct mt76_sw_queue *q = &dev->q_tx[i];
if (!q->q) if (!q->q)
......
...@@ -141,7 +141,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) ...@@ -141,7 +141,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
struct mt76_sw_queue *sq = &dev->q_tx[qid]; struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q; struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry; struct mt76_queue_entry entry;
unsigned int n_swq_queued[4] = {}; unsigned int n_swq_queued[8] = {};
unsigned int n_queued = 0; unsigned int n_queued = 0;
bool wake = false; bool wake = false;
int i, last; int i, last;
...@@ -178,13 +178,21 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) ...@@ -178,13 +178,21 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
q->queued -= n_queued; q->queued -= n_queued;
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) { for (i = 0; i < 4; i++) {
if (!n_swq_queued[i]) if (!n_swq_queued[i])
continue; continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i]; dev->q_tx[i].swq_queued -= n_swq_queued[i];
} }
/* ext PHY */
for (i = 0; i < 4; i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
}
if (flush) if (flush)
mt76_dma_sync_idx(dev, q); mt76_dma_sync_idx(dev, q);
......
...@@ -412,13 +412,16 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) ...@@ -412,13 +412,16 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
} }
EXPORT_SYMBOL_GPL(mt76_rx); EXPORT_SYMBOL_GPL(mt76_rx);
bool mt76_has_tx_pending(struct mt76_dev *dev) bool mt76_has_tx_pending(struct mt76_phy *phy)
{ {
struct mt76_dev *dev = phy->dev;
struct mt76_queue *q; struct mt76_queue *q;
int i; int i, offset;
offset = __MT_TXQ_MAX * (phy != &dev->phy);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { for (i = 0; i < __MT_TXQ_MAX; i++) {
q = dev->q_tx[i].q; q = dev->q_tx[offset + i].q;
if (q && q->queued) if (q && q->queued)
return true; return true;
} }
...@@ -486,7 +489,7 @@ void mt76_set_channel(struct mt76_phy *phy) ...@@ -486,7 +489,7 @@ void mt76_set_channel(struct mt76_phy *phy)
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5; int timeout = HZ / 5;
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout); wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(dev); mt76_update_survey(dev);
phy->chandef = *chandef; phy->chandef = *chandef;
......
...@@ -498,7 +498,7 @@ struct mt76_dev { ...@@ -498,7 +498,7 @@ struct mt76_dev {
u32 ampdu_ref; u32 ampdu_ref;
struct list_head txwi_cache; struct list_head txwi_cache;
struct mt76_sw_queue q_tx[__MT_TXQ_MAX]; struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX]; struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops; const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4]; int tx_dma_idx[4];
...@@ -752,7 +752,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw, ...@@ -752,7 +752,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
u16 tids, int nframes, u16 tids, int nframes,
enum ieee80211_frame_release_type reason, enum ieee80211_frame_release_type reason,
bool more_data); bool more_data);
bool mt76_has_tx_pending(struct mt76_dev *dev); bool mt76_has_tx_pending(struct mt76_phy *phy);
void mt76_set_channel(struct mt76_phy *phy); void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev); void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx, int mt76_get_survey(struct ieee80211_hw *hw, int idx,
......
...@@ -1426,7 +1426,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) ...@@ -1426,7 +1426,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev); mt7603_pse_client_reset(dev);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true); mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
......
...@@ -476,7 +476,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) ...@@ -476,7 +476,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart) if (restart)
mt76_mcu_restart(dev); mt76_mcu_restart(dev);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true); mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
......
...@@ -872,7 +872,7 @@ void mt76u_stop_tx(struct mt76_dev *dev) ...@@ -872,7 +872,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
struct mt76_queue *q; struct mt76_queue *q;
int i, j, ret; int i, j, ret;
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5); HZ / 5);
if (!ret) { if (!ret) {
dev_err(dev->dev, "timed out waiting for pending tx\n"); dev_err(dev->dev, "timed out waiting for pending tx\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment