Commit 91990519 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: move tx hw data queues in mt76_phy

Move hw data queues in mt76_phy from mt76_dev since mt7915 supports per
phy hw queues in dbdc mode
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent e637763b
...@@ -30,8 +30,8 @@ int mt76_queues_read(struct seq_file *s, void *data) ...@@ -30,8 +30,8 @@ int mt76_queues_read(struct seq_file *s, void *data)
struct mt76_dev *dev = dev_get_drvdata(s->private); struct mt76_dev *dev = dev_get_drvdata(s->private);
int i; int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
struct mt76_queue *q = dev->q_tx[i]; struct mt76_queue *q = dev->phy.q_tx[i];
if (!q) if (!q)
continue; continue;
......
...@@ -661,8 +661,11 @@ void mt76_dma_cleanup(struct mt76_dev *dev) ...@@ -661,8 +661,11 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_worker_disable(&dev->tx_worker); mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi); netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
mt76_dma_tx_cleanup(dev, dev->q_tx[i], true); mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
if (dev->phy2)
mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
}
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
......
...@@ -539,14 +539,11 @@ EXPORT_SYMBOL_GPL(mt76_rx); ...@@ -539,14 +539,11 @@ EXPORT_SYMBOL_GPL(mt76_rx);
bool mt76_has_tx_pending(struct mt76_phy *phy) bool mt76_has_tx_pending(struct mt76_phy *phy)
{ {
struct mt76_dev *dev = phy->dev;
struct mt76_queue *q; struct mt76_queue *q;
int i, offset; int i;
offset = __MT_TXQ_MAX * (phy != &dev->phy);
for (i = 0; i < __MT_TXQ_MAX; i++) { for (i = 0; i < __MT_TXQ_MAX; i++) {
q = dev->q_tx[offset + i]; q = phy->q_tx[i];
if (q && q->queued) if (q && q->queued)
return true; return true;
} }
......
...@@ -561,6 +561,8 @@ struct mt76_phy { ...@@ -561,6 +561,8 @@ struct mt76_phy {
unsigned long state; unsigned long state;
struct mt76_queue *q_tx[__MT_TXQ_MAX];
struct cfg80211_chan_def chandef; struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan; struct ieee80211_channel *main_chan;
...@@ -607,7 +609,6 @@ struct mt76_dev { ...@@ -607,7 +609,6 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache; struct list_head txwi_cache;
struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX]; struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops; const struct mt76_queue_ops *queue_ops;
...@@ -797,7 +798,7 @@ static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, ...@@ -797,7 +798,7 @@ static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
return PTR_ERR(q); return PTR_ERR(q);
q->qid = qid; q->qid = qid;
phy->dev->q_tx[qid] = q; phy->q_tx[qid] = q;
return 0; return 0;
} }
...@@ -938,7 +939,7 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); ...@@ -938,7 +939,7 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb); struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar); bool send_bar);
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
......
...@@ -24,14 +24,14 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) ...@@ -24,14 +24,14 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!skb) if (!skb)
return; return;
mt76_tx_queue_skb(dev, mdev->q_tx[MT_TXQ_BEACON], skb, mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], skb,
&mvif->sta.wcid, NULL); &mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock); spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
mdev->q_tx[MT_TXQ_CAB]->hw_idx) | dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
...@@ -81,7 +81,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) ...@@ -81,7 +81,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
data.dev = dev; data.dev = dev;
__skb_queue_head_init(&data.q); __skb_queue_head_init(&data.q);
q = mdev->q_tx[MT_TXQ_BEACON]; q = dev->mphy.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL, IEEE80211_IFACE_ITER_RESUME_ALL,
...@@ -92,13 +92,13 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) ...@@ -92,13 +92,13 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
/* Flush all previous CAB queue packets */ /* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_CAB], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
mt76_csa_check(mdev); mt76_csa_check(mdev);
if (mdev->csa_complete) if (mdev->csa_complete)
goto out; goto out;
q = mdev->q_tx[MT_TXQ_CAB]; q = dev->mphy.q_tx[MT_TXQ_CAB];
do { do {
nframes = skb_queue_len(&data.q); nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
...@@ -123,8 +123,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) ...@@ -123,8 +123,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
struct ieee80211_vif *vif = info->control.vif; struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
mt76_tx_queue_skb(dev, mdev->q_tx[MT_TXQ_CAB], skb, mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
&mvif->sta.wcid, NULL);
} }
mt76_queue_kick(dev, q); mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);
...@@ -139,8 +138,8 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) ...@@ -139,8 +138,8 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
((1 << (MT7603_MAX_INTERFACES - 1)) - 1))); ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
out: out:
mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_BEACON], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
if (mdev->q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask)) if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
dev->beacon_check++; dev->beacon_check++;
} }
......
...@@ -133,14 +133,14 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget) ...@@ -133,14 +133,14 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--) for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
if (napi_complete_done(napi, 0)) if (napi_complete_done(napi, 0))
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL); mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--) for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
mt7603_mac_sta_poll(dev); mt7603_mac_sta_poll(dev);
......
...@@ -445,7 +445,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev) ...@@ -445,7 +445,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
struct mt76_queue *q = dev->mt76.q_tx[i]; struct mt76_queue *q = dev->mphy.q_tx[i];
u8 qidx = q->hw_idx; u8 qidx = q->hw_idx;
u8 tid = ac_to_tid[i]; u8 tid = ac_to_tid[i];
u32 txtime = airtime[qidx]; u32 txtime = airtime[qidx];
...@@ -896,7 +896,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, ...@@ -896,7 +896,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif; struct ieee80211_vif *vif = info->control.vif;
struct mt76_queue *q = dev->mt76.q_tx[qid]; struct mt76_queue *q = dev->mphy.q_tx[qid];
struct mt7603_vif *mvif; struct mt7603_vif *mvif;
int wlan_idx; int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb); int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
...@@ -1436,7 +1436,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) ...@@ -1436,7 +1436,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++) for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) { mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i); mt76_queue_rx_reset(dev, i);
...@@ -1515,7 +1515,7 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev) ...@@ -1515,7 +1515,7 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
int i; int i;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
q = dev->mt76.q_tx[i]; q = dev->mphy.q_tx[i];
if (!q->queued) if (!q->queued)
continue; continue;
......
...@@ -383,7 +383,7 @@ mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list) ...@@ -383,7 +383,7 @@ mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list)
while ((skb = __skb_dequeue(list)) != NULL) { while ((skb = __skb_dequeue(list)) != NULL) {
int qid = skb_get_queue_mapping(skb); int qid = skb_get_queue_mapping(skb);
mt76_tx_queue_skb_raw(dev, dev->mt76.q_tx[qid], skb, 0); mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[qid], skb, 0);
} }
} }
...@@ -394,7 +394,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) ...@@ -394,7 +394,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list; struct sk_buff_head list;
mt76_stop_tx_queues(&dev->mt76, sta, true); mt76_stop_tx_queues(&dev->mphy, sta, true);
mt7603_wtbl_set_ps(dev, msta, ps); mt7603_wtbl_set_ps(dev, msta, ps);
if (ps) if (ps)
return; return;
...@@ -514,7 +514,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, ...@@ -514,7 +514,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
u16 cw_max = (1 << 10) - 1; u16 cw_max = (1 << 10) - 1;
u32 val; u32 val;
queue = dev->mt76.q_tx[queue]->hw_idx; queue = dev->mphy.q_tx[queue]->hw_idx;
if (params->cw_min) if (params->cw_min)
cw_min = params->cw_min; cw_min = params->cw_min;
......
...@@ -187,7 +187,7 @@ mt7615_reset_test_set(void *data, u64 val) ...@@ -187,7 +187,7 @@ mt7615_reset_test_set(void *data, u64 val)
skb_put(skb, 1); skb_put(skb, 1);
mt7615_mutex_acquire(dev); mt7615_mutex_acquire(dev);
mt76_tx_queue_skb_raw(dev, dev->mt76.q_tx[0], skb, 0); mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[0], skb, 0);
mt7615_mutex_release(dev); mt7615_mutex_release(dev);
return 0; return 0;
...@@ -336,7 +336,7 @@ mt7615_queues_read(struct seq_file *s, void *data) ...@@ -336,7 +336,7 @@ mt7615_queues_read(struct seq_file *s, void *data)
struct mt76_queue *q; struct mt76_queue *q;
char *queue; char *queue;
} queue_map[] = { } queue_map[] = {
{ dev->mt76.q_tx[MT_TXQ_BE], "PDMA0" }, { dev->mphy.q_tx[MT_TXQ_BE], "PDMA0" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], "MCUQ" }, { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUQ" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
}; };
......
...@@ -60,7 +60,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev) ...@@ -60,7 +60,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return ret; return ret;
for (i = 1; i <= MT_TXQ_PSD ; i++) for (i = 1; i <= MT_TXQ_PSD ; i++)
dev->mt76.q_tx[i] = dev->mt76.q_tx[0]; dev->mphy.q_tx[i] = dev->mphy.q_tx[0];
return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU, return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE); MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
......
...@@ -385,7 +385,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) ...@@ -385,7 +385,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
{ {
struct mt7615_phy *phy = mt7615_ext_phy(dev); struct mt7615_phy *phy = mt7615_ext_phy(dev);
struct mt76_phy *mphy; struct mt76_phy *mphy;
int ret; int i, ret;
if (!is_mt7615(&dev->mt76)) if (!is_mt7615(&dev->mt76))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -429,6 +429,10 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) ...@@ -429,6 +429,10 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
mphy->sband_2g.sband.n_channels = 0; mphy->sband_2g.sband.n_channels = 0;
mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
/* mt7615 second phy shares the same hw queues with the primary one */
for (i = 0; i <= MT_TXQ_PSD ; i++)
mphy->q_tx[i] = dev->mphy.q_tx[i];
ret = mt76_register_phy(mphy); ret = mt76_register_phy(mphy);
if (ret) if (ret)
ieee80211_free_hw(mphy->hw); ieee80211_free_hw(mphy->hw);
......
...@@ -1435,12 +1435,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) ...@@ -1435,12 +1435,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count; u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[MT_TXQ_PSD], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
if (is_mt7615(&dev->mt76)) { if (is_mt7615(&dev->mt76)) {
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[MT_TXQ_BE], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
} else { } else {
for (i = 0; i < IEEE80211_NUM_ACS; i++) for (i = 0; i < IEEE80211_NUM_ACS; i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
} }
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
...@@ -2046,7 +2046,7 @@ void mt7615_dma_reset(struct mt7615_dev *dev) ...@@ -2046,7 +2046,7 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++) for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) { mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i); mt76_queue_rx_reset(dev, i);
......
...@@ -287,7 +287,7 @@ void mt7663s_txrx_worker(struct mt76_worker *w) ...@@ -287,7 +287,7 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
/* tx */ /* tx */
for (i = 0; i <= MT_TXQ_PSD; i++) { for (i = 0; i <= MT_TXQ_PSD; i++) {
ret = mt7663s_tx_run_queue(dev, dev->q_tx[i]); ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
if (ret > 0) if (ret > 0)
nframes += ret; nframes += ret;
} }
......
...@@ -15,7 +15,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) ...@@ -15,7 +15,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
{ {
struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet); struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
struct mt76_dev *mdev = &dev->mt76; struct mt76_dev *mdev = &dev->mt76;
struct mt76_queue *q = mdev->q_tx[MT_TXQ_PSD]; struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {}; struct beacon_bc_data data = {};
struct sk_buff *skb; struct sk_buff *skb;
int i; int i;
...@@ -59,8 +59,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) ...@@ -59,8 +59,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct ieee80211_vif *vif = info->control.vif; struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mt76_tx_queue_skb(dev, mdev->q_tx[MT_TXQ_PSD], skb, mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
&mvif->group_wcid, NULL);
} }
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);
} }
...@@ -151,14 +150,14 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget) ...@@ -151,14 +150,14 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--) for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
if (napi_complete_done(napi, 0)) if (napi_complete_done(napi, 0))
mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--) for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
mt76_worker_schedule(&dev->mt76.tx_worker); mt76_worker_schedule(&dev->mt76.tx_worker);
...@@ -282,7 +281,7 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) ...@@ -282,7 +281,7 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
if (dev->mt76.csa_complete) if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76); mt76_csa_finish(&dev->mt76);
else else
mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]); mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]);
} }
if (intr & MT_INT_TX_STAT) if (intr & MT_INT_TX_STAT)
...@@ -347,7 +346,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev) ...@@ -347,7 +346,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
int i; int i;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
q = dev->mt76.q_tx[i]; q = dev->mphy.q_tx[i];
if (!q->queued) if (!q->queued)
continue; continue;
...@@ -467,7 +466,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) ...@@ -467,7 +466,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++) for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) { mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i); mt76_queue_rx_reset(dev, i);
......
...@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, ...@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info) struct mt76_tx_info *tx_info)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx); int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
struct mt76x02_txwi *txwi; struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel; enum mt76_qsel qsel;
......
...@@ -488,7 +488,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ...@@ -488,7 +488,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 cw_min = 5, cw_max = 10, qid; u8 cw_min = 5, cw_max = 10, qid;
u32 val; u32 val;
qid = dev->mt76.q_tx[queue]->hw_idx; qid = dev->mphy.q_tx[queue]->hw_idx;
if (params->cw_min) if (params->cw_min)
cw_min = fls(params->cw_min); cw_min = fls(params->cw_min);
...@@ -622,7 +622,7 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, ...@@ -622,7 +622,7 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx; int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true); mt76_stop_tx_queues(&dev->mphy, sta, true);
if (mt76_is_mmio(mdev)) if (mt76_is_mmio(mdev))
mt76x02_mac_wcid_set_drop(dev, idx, ps); mt76x02_mac_wcid_set_drop(dev, idx, ps);
} }
......
...@@ -284,7 +284,7 @@ mt7915_queues_read(struct seq_file *s, void *data) ...@@ -284,7 +284,7 @@ mt7915_queues_read(struct seq_file *s, void *data)
struct mt76_queue *q; struct mt76_queue *q;
char *queue; char *queue;
} queue_map[] = { } queue_map[] = {
{ dev->mt76.q_tx[MT_TXQ_BE], "WFDMA0" }, { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" }, { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" },
{ dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" }, { dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
......
...@@ -6,16 +6,16 @@ ...@@ -6,16 +6,16 @@
#include "mac.h" #include "mac.h"
static int static int
mt7915_init_tx_queues(struct mt7915_dev *dev, int idx, int n_desc) mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
{ {
int i, err; int i, err;
err = mt76_init_tx_queue(&dev->mphy, 0, idx, n_desc, MT_TX_RING_BASE); err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
if (err < 0) if (err < 0)
return err; return err;
for (i = 0; i <= MT_TXQ_PSD; i++) for (i = 0; i <= MT_TXQ_PSD; i++)
dev->mt76.q_tx[i] = dev->mt76.q_tx[0]; phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
return 0; return 0;
} }
...@@ -237,7 +237,7 @@ int mt7915_dma_init(struct mt7915_dev *dev) ...@@ -237,7 +237,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
/* init tx queue */ /* init tx queue */
ret = mt7915_init_tx_queues(dev, MT7915_TXQ_BAND0, ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
MT7915_TX_RING_SIZE); MT7915_TX_RING_SIZE);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1072,8 +1072,8 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) ...@@ -1072,8 +1072,8 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
u8 i, count; u8 i, count;
/* clean DMA queues and unmap buffers first */ /* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_PSD], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_BE], false); mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
/* /*
* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
...@@ -1410,8 +1410,9 @@ mt7915_update_beacons(struct mt7915_dev *dev) ...@@ -1410,8 +1410,9 @@ mt7915_update_beacons(struct mt7915_dev *dev)
} }
static void static void
mt7915_dma_reset(struct mt7915_dev *dev) mt7915_dma_reset(struct mt7915_phy *phy)
{ {
struct mt7915_dev *dev = phy->dev;
int i; int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG, mt76_clear(dev, MT_WFDMA0_GLO_CFG,
...@@ -1422,7 +1423,7 @@ mt7915_dma_reset(struct mt7915_dev *dev) ...@@ -1422,7 +1423,7 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
for (i = 0; i < __MT_TXQ_MAX; i++) for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true); mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) { mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i); mt76_queue_rx_reset(dev, i);
...@@ -1478,7 +1479,7 @@ void mt7915_mac_reset_work(struct work_struct *work) ...@@ -1478,7 +1479,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
mt7915_dma_reset(dev); mt7915_dma_reset(&dev->phy);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
......
...@@ -67,7 +67,7 @@ static int mt76s_alloc_tx(struct mt76_dev *dev) ...@@ -67,7 +67,7 @@ static int mt76s_alloc_tx(struct mt76_dev *dev)
return PTR_ERR(q); return PTR_ERR(q);
q->qid = i; q->qid = i;
dev->q_tx[i] = q; dev->phy.q_tx[i] = q;
} }
q = mt76s_alloc_tx_queue(dev); q = mt76s_alloc_tx_queue(dev);
...@@ -206,7 +206,8 @@ static void mt76s_status_worker(struct mt76_worker *w) ...@@ -206,7 +206,8 @@ static void mt76s_status_worker(struct mt76_worker *w)
nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]); nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
for (i = 0; i <= MT_TXQ_PSD; i++) for (i = 0; i <= MT_TXQ_PSD; i++)
nframes += mt76s_process_tx_queue(dev, dev->q_tx[i]); nframes += mt76s_process_tx_queue(dev,
dev->phy.q_tx[i]);
if (dev->drv->tx_status_data && if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
......
...@@ -23,6 +23,7 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev) ...@@ -23,6 +23,7 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev)
{ {
struct mt76_testmode_data *td = &dev->test; struct mt76_testmode_data *td = &dev->test;
struct mt76_wcid *wcid = &dev->global_wcid; struct mt76_wcid *wcid = &dev->global_wcid;
struct mt76_phy *phy = &dev->phy;
struct sk_buff *skb = td->tx_skb; struct sk_buff *skb = td->tx_skb;
struct mt76_queue *q; struct mt76_queue *q;
int qid; int qid;
...@@ -31,7 +32,7 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev) ...@@ -31,7 +32,7 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev)
return; return;
qid = skb_get_queue_mapping(skb); qid = skb_get_queue_mapping(skb);
q = dev->q_tx[qid]; q = phy->q_tx[qid];
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
......
...@@ -230,8 +230,8 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, ...@@ -230,8 +230,8 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
bool *stop) bool *stop)
{ {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_queue *q = phy->q_tx[qid];
struct mt76_dev *dev = phy->dev; struct mt76_dev *dev = phy->dev;
struct mt76_queue *q = dev->q_tx[qid];
bool non_aql; bool non_aql;
int pending; int pending;
int idx; int idx;
...@@ -286,7 +286,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, ...@@ -286,7 +286,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
if (ext_phy) if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
q = dev->q_tx[qid]; q = phy->q_tx[qid];
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
...@@ -345,7 +345,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, ...@@ -345,7 +345,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct mt76_phy *phy = hw->priv; struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev; struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL; struct sk_buff *last_skb = NULL;
struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD]; struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
int i; int i;
spin_lock_bh(&hwq->lock); spin_lock_bh(&hwq->lock);
...@@ -449,8 +449,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, ...@@ -449,8 +449,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
static int static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{ {
struct mt76_dev *dev = phy->dev; struct mt76_queue *q = phy->q_tx[qid];
struct mt76_queue *q = dev->q_tx[qid];
struct ieee80211_txq *txq; struct ieee80211_txq *txq;
struct mt76_txq *mtxq; struct mt76_txq *mtxq;
struct mt76_wcid *wcid; struct mt76_wcid *wcid;
...@@ -539,7 +538,7 @@ void mt76_tx_worker(struct mt76_worker *w) ...@@ -539,7 +538,7 @@ void mt76_tx_worker(struct mt76_worker *w)
#endif #endif
} }
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar) bool send_bar)
{ {
int i; int i;
...@@ -552,7 +551,7 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, ...@@ -552,7 +551,7 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
if (!txq) if (!txq)
continue; continue;
hwq = dev->q_tx[mt76_txq_get_qid(txq)]; hwq = phy->q_tx[mt76_txq_get_qid(txq)];
mtxq = (struct mt76_txq *)txq->drv_priv; mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&hwq->lock); spin_lock_bh(&hwq->lock);
......
...@@ -815,7 +815,7 @@ static void mt76u_status_worker(struct mt76_worker *w) ...@@ -815,7 +815,7 @@ static void mt76u_status_worker(struct mt76_worker *w)
int i; int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i]; q = dev->phy.q_tx[i];
while (q->queued > 0) { while (q->queued > 0) {
if (!q->entry[q->tail].done) if (!q->entry[q->tail].done)
...@@ -983,7 +983,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) ...@@ -983,7 +983,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
for (i = 0; i <= MT_TXQ_PSD; i++) { for (i = 0; i <= MT_TXQ_PSD; i++) {
if (i >= IEEE80211_NUM_ACS) { if (i >= IEEE80211_NUM_ACS) {
dev->q_tx[i] = dev->q_tx[0]; dev->phy.q_tx[i] = dev->phy.q_tx[0];
continue; continue;
} }
...@@ -995,7 +995,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) ...@@ -995,7 +995,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q->hw_idx = mt76u_ac_to_hwq(dev, i); q->hw_idx = mt76u_ac_to_hwq(dev, i);
q->qid = i; q->qid = i;
dev->q_tx[i] = q; dev->phy.q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev, q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry), MT_NUM_TX_ENTRIES, sizeof(*q->entry),
...@@ -1024,7 +1024,7 @@ static void mt76u_free_tx(struct mt76_dev *dev) ...@@ -1024,7 +1024,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
struct mt76_queue *q; struct mt76_queue *q;
int j; int j;
q = dev->q_tx[i]; q = dev->phy.q_tx[i];
if (!q) if (!q)
continue; continue;
...@@ -1052,7 +1052,7 @@ void mt76u_stop_tx(struct mt76_dev *dev) ...@@ -1052,7 +1052,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n"); dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i]; q = dev->phy.q_tx[i];
if (!q) if (!q)
continue; continue;
...@@ -1064,7 +1064,7 @@ void mt76u_stop_tx(struct mt76_dev *dev) ...@@ -1064,7 +1064,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
* will fail to submit urb, cleanup those skb's manually. * will fail to submit urb, cleanup those skb's manually.
*/ */
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i]; q = dev->phy.q_tx[i];
if (!q) if (!q)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment