Commit 4f44d326 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'wireless-2023-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless

Kalle Valo says:

====================
wireless fixes for v6.3

Third set of fixes for v6.3. mt76 has two kernel crash fixes and
adding back 160 MHz channel support for mt7915. mac80211 has fixes for
a race in transmit path and two mesh related fixes. iwlwifi also has
fixes for races.

* tag 'wireless-2023-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless:
  wifi: mac80211: fix mesh path discovery based on unicast packets
  wifi: mac80211: fix qos on mesh interfaces
  wifi: iwlwifi: mvm: protect TXQ list manipulation
  wifi: iwlwifi: mvm: fix mvmtxq->stopped handling
  wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue()
  wifi: mwifiex: mark OF related data as maybe unused
  wifi: mt76: connac: do not check WED status for non-mmio devices
  wifi: mt76: mt7915: add back 160MHz channel width support for MT7915
  wifi: mt76: do not run mt76_unregister_device() on unregistered hw
====================

Link: https://lore.kernel.org/r/20230323110332.C4FE4C433D2@smtp.kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents b1de5c78 f355f701
...@@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) ...@@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
rcu_read_lock(); rcu_read_lock();
do { do {
while (likely(!mvmtxq->stopped && while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
&mvmtxq->state) &&
!test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
&mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq); skb = ieee80211_tx_dequeue(hw, txq);
...@@ -757,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, ...@@ -757,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
/* if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
* Please note that racing is handled very carefully here: !txq->sta) {
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
* deleted afterwards.
* This means that if:
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
* queue is allocated and we can TX.
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
* a race, should defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
* need to allocate the queue and defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
* queue is already scheduled for allocation, no need to allocate,
* should defer the frame.
*/
/* If the queue is allocated TX and return. */
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
/*
* Check that list is empty to avoid a race where txq_id is
* already updated, but the queue allocation work wasn't
* finished
*/
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
return;
iwl_mvm_mac_itxq_xmit(hw, txq); iwl_mvm_mac_itxq_xmit(hw, txq);
return; return;
} }
/* The list is being deleted only after the queue is fully allocated. */ /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
if (!list_empty(&mvmtxq->list)) * to handle any packets we leave on the txq now
return; */
spin_lock_bh(&mvm->add_stream_lock);
/* The list is being deleted only after the queue is fully allocated. */
if (list_empty(&mvmtxq->list) &&
/* recheck under lock */
!test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk); schedule_work(&mvm->add_stream_wk);
}
spin_unlock_bh(&mvm->add_stream_lock);
} }
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
......
...@@ -729,7 +729,10 @@ struct iwl_mvm_txq { ...@@ -729,7 +729,10 @@ struct iwl_mvm_txq {
struct list_head list; struct list_head list;
u16 txq_id; u16 txq_id;
atomic_t tx_request; atomic_t tx_request;
bool stopped; #define IWL_MVM_TXQ_STATE_STOP_FULL 0
#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
#define IWL_MVM_TXQ_STATE_READY 2
unsigned long state;
}; };
static inline struct iwl_mvm_txq * static inline struct iwl_mvm_txq *
...@@ -827,6 +830,7 @@ struct iwl_mvm { ...@@ -827,6 +830,7 @@ struct iwl_mvm {
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
}; };
struct work_struct add_stream_wk; /* To add streams to queues */ struct work_struct add_stream_wk; /* To add streams to queues */
spinlock_t add_stream_lock;
const char *nvm_file_name; const char *nvm_file_name;
struct iwl_nvm_data *nvm_data; struct iwl_nvm_data *nvm_data;
......
...@@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs); INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
init_waitqueue_head(&mvm->rx_sync_waitq); init_waitqueue_head(&mvm->rx_sync_waitq);
...@@ -1691,7 +1692,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, ...@@ -1691,7 +1692,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
txq = sta->txq[tid]; txq = sta->txq[tid];
mvmtxq = iwl_mvm_txq_from_mac80211(txq); mvmtxq = iwl_mvm_txq_from_mac80211(txq);
mvmtxq->stopped = !start; if (start)
clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
else
set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq); iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
......
...@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_txq *mvmtxq = struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid); iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list); list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_unlock_bh(&mvm->add_stream_lock);
} }
/* Regardless if this is a reserved TXQ for a STA - mark it as false */ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
...@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) ...@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
disable_agg_tids |= BIT(tid); disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list); list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_unlock_bh(&mvm->add_stream_lock);
} }
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
...@@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, ...@@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
queue, iwl_mvm_ac_to_tx_fifo[ac]); queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop the queue and wait for it to empty */ /* Stop the queue and wait for it to empty */
txq->stopped = true; set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) { if (ret) {
...@@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, ...@@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
out: out:
/* Continue using the queue */ /* Continue using the queue */
txq->stopped = false; clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
return ret; return ret;
} }
...@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) ...@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
* a queue in the function itself. * a queue in the function itself.
*/ */
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list); list_del_init(&mvmtxq->list);
spin_unlock_bh(&mvm->add_stream_lock);
continue; continue;
} }
list_del_init(&mvmtxq->list); /* now we're ready, any remaining races/concurrency will be
* handled in iwl_mvm_mac_itxq_xmit()
*/
set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
local_bh_disable(); local_bh_disable();
spin_lock(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
spin_unlock(&mvm->add_stream_lock);
iwl_mvm_mac_itxq_xmit(mvm->hw, txq); iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable(); local_bh_enable();
} }
...@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, ...@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct iwl_mvm_txq *mvmtxq = struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]); iwl_mvm_txq_from_mac80211(sta->txq[i]);
spin_lock_bh(&mvm->add_stream_lock);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
list_del_init(&mvmtxq->list); list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
spin_unlock_bh(&mvm->add_stream_lock);
} }
} }
......
...@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = { ...@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
.can_ext_scan = true, .can_ext_scan = true,
}; };
static const struct of_device_id mwifiex_pcie_of_match_table[] = { static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
{ .compatible = "pci11ab,2b42" }, { .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" }, { .compatible = "pci1b4b,2b42" },
{ } { }
......
...@@ -495,7 +495,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { ...@@ -495,7 +495,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE}, {"EXTLAST", NULL, 0, 0xFE},
}; };
static const struct of_device_id mwifiex_sdio_of_match_table[] = { static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
{ .compatible = "marvell,sd8787" }, { .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" }, { .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8978" }, { .compatible = "marvell,sd8978" },
......
...@@ -539,6 +539,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht, ...@@ -539,6 +539,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
if (ret) if (ret)
return ret; return ret;
set_bit(MT76_STATE_REGISTERED, &phy->state);
phy->dev->phys[phy->band_idx] = phy; phy->dev->phys[phy->band_idx] = phy;
return 0; return 0;
...@@ -549,6 +550,9 @@ void mt76_unregister_phy(struct mt76_phy *phy) ...@@ -549,6 +550,9 @@ void mt76_unregister_phy(struct mt76_phy *phy)
{ {
struct mt76_dev *dev = phy->dev; struct mt76_dev *dev = phy->dev;
if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
return;
if (IS_ENABLED(CONFIG_MT76_LEDS)) if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(phy); mt76_led_cleanup(phy);
mt76_tx_status_check(dev, true); mt76_tx_status_check(dev, true);
...@@ -719,6 +723,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, ...@@ -719,6 +723,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret; return ret;
WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
set_bit(MT76_STATE_REGISTERED, &phy->state);
sched_set_fifo_low(dev->tx_worker.task); sched_set_fifo_low(dev->tx_worker.task);
return 0; return 0;
...@@ -729,6 +734,9 @@ void mt76_unregister_device(struct mt76_dev *dev) ...@@ -729,6 +734,9 @@ void mt76_unregister_device(struct mt76_dev *dev)
{ {
struct ieee80211_hw *hw = dev->hw; struct ieee80211_hw *hw = dev->hw;
if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
return;
if (IS_ENABLED(CONFIG_MT76_LEDS)) if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(&dev->phy); mt76_led_cleanup(&dev->phy);
mt76_tx_status_check(dev, true); mt76_tx_status_check(dev, true);
......
...@@ -402,6 +402,7 @@ struct mt76_tx_cb { ...@@ -402,6 +402,7 @@ struct mt76_tx_cb {
enum { enum {
MT76_STATE_INITIALIZED, MT76_STATE_INITIALIZED,
MT76_STATE_REGISTERED,
MT76_STATE_RUNNING, MT76_STATE_RUNNING,
MT76_STATE_MCU_RUNNING, MT76_STATE_MCU_RUNNING,
MT76_SCANNING, MT76_SCANNING,
......
...@@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv); ...@@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb) int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
{ {
if (!mt76_is_mmio(dev))
return 0;
if (!mtk_wed_device_active(&dev->mmio.wed)) if (!mtk_wed_device_active(&dev->mmio.wed))
return 0; return 0;
......
...@@ -383,7 +383,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy) ...@@ -383,7 +383,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
hw->max_tx_fragments = 4; hw->max_tx_fragments = 4;
...@@ -396,6 +395,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy) ...@@ -396,6 +395,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
} }
if (phy->mt76->cap.has_5ghz) { if (phy->mt76->cap.has_5ghz) {
struct ieee80211_sta_vht_cap *vht_cap;
vht_cap = &phy->mt76->sband_5g.sband.vht_cap;
phy->mt76->sband_5g.sband.ht_cap.cap |= phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU; IEEE80211_HT_CAP_MAX_AMSDU;
...@@ -403,19 +405,28 @@ mt7915_init_wiphy(struct mt7915_phy *phy) ...@@ -403,19 +405,28 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
IEEE80211_HT_MPDU_DENSITY_4; IEEE80211_HT_MPDU_DENSITY_4;
if (is_mt7915(&dev->mt76)) { if (is_mt7915(&dev->mt76)) {
phy->mt76->sband_5g.sband.vht_cap.cap |= vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
if (!dev->dbdc_support)
vht_cap->cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
} else { } else {
phy->mt76->sband_5g.sband.vht_cap.cap |= vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
/* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */ /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
phy->mt76->sband_5g.sband.vht_cap.cap |= vht_cap->cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
} }
if (!is_mt7915(&dev->mt76) || !dev->dbdc_support)
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
} }
mt76_set_stream_caps(phy->mt76, true); mt76_set_stream_caps(phy->mt76, true);
...@@ -841,9 +852,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy, ...@@ -841,9 +852,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
int sts = hweight8(phy->mt76->chainmask); int sts = hweight8(phy->mt76->chainmask);
u8 c, sts_160 = sts; u8 c, sts_160 = sts;
/* mt7915 doesn't support bw160 */ /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
if (is_mt7915(&dev->mt76)) if (is_mt7915(&dev->mt76)) {
if (!dev->dbdc_support)
sts_160 /= 2;
else
sts_160 = 0; sts_160 = 0;
}
#ifdef CONFIG_MAC80211_MESH #ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT) if (vif == NL80211_IFTYPE_MESH_POINT)
...@@ -944,10 +959,15 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, ...@@ -944,10 +959,15 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask); int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0; u16 mcs_map = 0;
u16 mcs_map_160 = 0; u16 mcs_map_160 = 0;
u8 nss_160 = nss; u8 nss_160;
/* Can't do 160MHz with mt7915 */ if (!is_mt7915(&dev->mt76))
if (is_mt7915(&dev->mt76)) nss_160 = nss;
else if (!dev->dbdc_support)
/* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
nss_160 = nss / 2;
else
/* Can't do 160MHz with mt7915 dbdc */
nss_160 = 0; nss_160 = 0;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
......
...@@ -1284,6 +1284,9 @@ struct ieee80211_local { ...@@ -1284,6 +1284,9 @@ struct ieee80211_local {
struct list_head active_txqs[IEEE80211_NUM_ACS]; struct list_head active_txqs[IEEE80211_NUM_ACS];
u16 schedule_round[IEEE80211_NUM_ACS]; u16 schedule_round[IEEE80211_NUM_ACS];
/* serializes ieee80211_handle_wake_tx_queue */
spinlock_t handle_wake_tx_queue_lock;
u16 airtime_flags; u16 airtime_flags;
u32 aql_txq_limit_low[IEEE80211_NUM_ACS]; u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
u32 aql_txq_limit_high[IEEE80211_NUM_ACS]; u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
......
...@@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, ...@@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->aql_threshold = IEEE80211_AQL_THRESHOLD; local->aql_threshold = IEEE80211_AQL_THRESHOLD;
atomic_set(&local->aql_total_pending_airtime, 0); atomic_set(&local->aql_total_pending_airtime, 0);
spin_lock_init(&local->handle_wake_tx_queue_lock);
INIT_LIST_HEAD(&local->chanctx_list); INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx); mutex_init(&local->chanctx_mtx);
......
...@@ -2765,17 +2765,6 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta ...@@ -2765,17 +2765,6 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
mesh_rmc_check(sdata, eth->h_source, mesh_hdr)) mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
return RX_DROP_MONITOR; return RX_DROP_MONITOR;
/* Frame has reached destination. Don't forward */
if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
goto rx_accept;
if (!ifmsh->mshcfg.dot11MeshForwarding) {
if (is_multicast_ether_addr(eth->h_dest))
goto rx_accept;
return RX_DROP_MONITOR;
}
/* forward packet */ /* forward packet */
if (sdata->crypto_tx_tailroom_needed_cnt) if (sdata->crypto_tx_tailroom_needed_cnt)
tailroom = IEEE80211_ENCRYPT_TAILROOM; tailroom = IEEE80211_ENCRYPT_TAILROOM;
...@@ -2814,6 +2803,17 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta ...@@ -2814,6 +2803,17 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
rcu_read_unlock(); rcu_read_unlock();
} }
/* Frame has reached destination. Don't forward */
if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
goto rx_accept;
if (!ifmsh->mshcfg.dot11MeshForwarding) {
if (is_multicast_ether_addr(eth->h_dest))
goto rx_accept;
return RX_DROP_MONITOR;
}
skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control, ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
......
...@@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, ...@@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
struct ieee80211_txq *queue; struct ieee80211_txq *queue;
spin_lock(&local->handle_wake_tx_queue_lock);
/* Use ieee80211_next_txq() for airtime fairness accounting */ /* Use ieee80211_next_txq() for airtime fairness accounting */
ieee80211_txq_schedule_start(hw, txq->ac); ieee80211_txq_schedule_start(hw, txq->ac);
while ((queue = ieee80211_next_txq(hw, txq->ac))) { while ((queue = ieee80211_next_txq(hw, txq->ac))) {
...@@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, ...@@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
ieee80211_return_txq(hw, queue, false); ieee80211_return_txq(hw, queue, false);
} }
ieee80211_txq_schedule_end(hw, txq->ac); ieee80211_txq_schedule_end(hw, txq->ac);
spin_unlock(&local->handle_wake_tx_queue_lock);
} }
EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue); EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
......
...@@ -147,6 +147,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, ...@@ -147,6 +147,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb) struct sta_info *sta, struct sk_buff *skb)
{ {
const struct ethhdr *eth = (void *)skb->data;
struct mac80211_qos_map *qos_map; struct mac80211_qos_map *qos_map;
bool qos; bool qos;
...@@ -154,8 +155,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, ...@@ -154,8 +155,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
skb_get_hash(skb); skb_get_hash(skb);
/* all mesh/ocb stations are required to support WME */ /* all mesh/ocb stations are required to support WME */
if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type == NL80211_IFTYPE_OCB)) !is_multicast_ether_addr(eth->h_dest)) ||
(sdata->vif.type == NL80211_IFTYPE_OCB && sta))
qos = true; qos = true;
else if (sta) else if (sta)
qos = sta->sta.wme; qos = sta->sta.wme;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment