Commit cfbc6c4c authored by Sara Sharon's avatar Sara Sharon Committed by Luca Coelho

iwlwifi: mvm: support mac80211 TXQs model

Move to use the new mac80211 TXQs implementation. This has
quite a few benefits for us. We can get rid of the awkward
mapping of DQA to mac80211 queues. We can stop buffering
traffic while waiting for the queue to be allocated. We can
also use mac80211 AMSDUs instead of building it ourselves.

The usage is pretty simple:
Each ieee80211_txq contains iwl_mvm_txq. There is such a
queue for each TID, and one for management frames. We keep
having static AP queues for probes and non-bufferable MMPDUs,
along with broadcast and multicast queues. Those are being
used from the "old" TX invocation path - iwl_mvm_mac_tx.

When there is a new frame in a TXQ, iwl_mvm_mac_wake_tx is
being called, and either invokes the TX path, or allocates
the queue if it does not exist.

Most of the TX path is left untouched, although we can consider
cleaning it up some more, for example get rid of the duplication
of txq_id in both iwl_mvm_txq and iwl_mvm_dqa_txq_info.
Signed-off-by: default avatarSara Sharon <sara.sharon@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent c281f137
...@@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) ...@@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private; file->private_data = inode->i_private;
ieee80211_stop_queues(mvm->hw);
synchronize_net(); synchronize_net();
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
...@@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) ...@@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
rtnl_unlock(); rtnl_unlock();
if (err > 0) if (err > 0)
err = -EINVAL; err = -EINVAL;
if (err) { if (err)
ieee80211_wake_queues(mvm->hw);
return err; return err;
}
mvm->d3_test_active = true; mvm->d3_test_active = true;
mvm->keep_vif = NULL; mvm->keep_vif = NULL;
return 0; return 0;
...@@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) ...@@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL, mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
ieee80211_wake_queues(mvm->hw);
return 0; return 0;
} }
......
...@@ -295,7 +295,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -295,7 +295,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_notification_wait alive_wait; struct iwl_notification_wait alive_wait;
struct iwl_mvm_alive_data alive_data; struct iwl_mvm_alive_data alive_data;
const struct fw_img *fw; const struct fw_img *fw;
int ret, i; int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE }; static const u16 alive_cmd[] = { MVM_ALIVE };
...@@ -373,9 +373,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -373,9 +373,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
BIT(IWL_MAX_TID_COUNT + 2); BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_fw_set_dbg_rec_on(&mvm->fwrt); iwl_fw_set_dbg_rec_on(&mvm->fwrt);
......
...@@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data { ...@@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data {
bool found_vif; bool found_vif;
}; };
struct iwl_mvm_hw_queues_iface_iterator_data {
struct ieee80211_vif *exclude_vif;
unsigned long used_hw_queues;
};
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
...@@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, ...@@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
data->preferred_tsf = NUM_TSF_IDS; data->preferred_tsf = NUM_TSF_IDS;
} }
/*
* Get the mask of the queues used by the vif
*/
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
{
u32 qmask = 0, ac;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
qmask |= BIT(vif->hw_queue[ac]);
}
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
qmask |= BIT(vif->cab_queue);
return qmask;
}
static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
/* exclude the given vif */
if (vif == data->exclude_vif)
return;
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
}
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif)
{
struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue) |
BIT(IWL_MVM_DQA_GCAST_QUEUE),
};
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_iface_hw_queues_iter, &data);
return data.used_hw_queues;
}
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
...@@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data); iwl_mvm_mac_iface_iterator, &data);
used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
/* /*
* In the case we're getting here during resume, it's similar to * In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find * firmware restart, and with RESUME_ALL the iterator will find
...@@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* the ones here - no real limit * the ones here - no real limit
*/ */
queue_limit = IEEE80211_MAX_QUEUES; queue_limit = IEEE80211_MAX_QUEUES;
BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]));
/* /*
* Find available queues, and allocate them to the ACs. When in * Find available queues, and allocate them to the ACs. When in
...@@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* queue value (when queue is enabled). * queue value (when queue is enabled).
*/ */
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
} else {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
} }
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
...@@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
exit_fail: exit_fail:
memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
return ret; return ret;
} }
...@@ -1185,7 +1115,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, ...@@ -1185,7 +1115,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa, if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE)) IWL_UCODE_TLV_API_STA_TYPE))
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
/* /*
* Only set the beacon time when the MAC is being added, when we * Only set the beacon time when the MAC is being added, when we
......
...@@ -425,7 +425,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -425,7 +425,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, SPECTRUM_MGMT);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, QUEUE_CONTROL);
ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
...@@ -439,6 +438,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -439,6 +438,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
if (iwl_mvm_has_tlc_offload(mvm)) { if (iwl_mvm_has_tlc_offload(mvm)) {
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
...@@ -549,6 +550,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -549,6 +550,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16); hw->chanctx_data_size = sizeof(u16);
hw->txq_data_size = sizeof(struct iwl_mvm_txq);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_CLIENT) |
...@@ -798,7 +800,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, ...@@ -798,7 +800,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out; goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb); __skb_queue_tail(&mvm->d0i3_tx, skb);
ieee80211_stop_queues(mvm->hw);
/* trigger wakeup */ /* trigger wakeup */
iwl_mvm_ref(mvm, IWL_MVM_REF_TX); iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
...@@ -818,13 +819,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ...@@ -818,13 +819,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_sta *sta = control->sta; struct ieee80211_sta *sta = control->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_hdr *hdr = (void *)skb->data;
bool offchannel = IEEE80211_SKB_CB(skb)->flags &
IEEE80211_TX_CTL_TX_OFFCHAN;
if (iwl_mvm_is_radio_killed(mvm)) { if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
goto drop; goto drop;
} }
if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && if (offchannel &&
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop; goto drop;
...@@ -837,8 +840,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ...@@ -837,8 +840,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
sta = NULL; sta = NULL;
/* If there is no sta, and it's not offchannel - send through AP */ /* If there is no sta, and it's not offchannel - send through AP */
if (info->control.vif->type == NL80211_IFTYPE_STATION && if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) { !offchannel) {
struct iwl_mvm_vif *mvmvif = struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif); iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
...@@ -866,6 +869,77 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ...@@ -866,6 +869,77 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb); ieee80211_free_txskb(hw, skb);
} }
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
struct sk_buff *skb = NULL;
spin_lock(&mvmtxq->tx_path_lock);
rcu_read_lock();
while (likely(!mvmtxq->stopped &&
(mvm->trans->system_pm_mode ==
IWL_PLAT_PM_MODE_DISABLED))) {
skb = ieee80211_tx_dequeue(hw, txq);
if (!skb)
break;
if (!txq->sta)
iwl_mvm_tx_skb_non_sta(mvm, skb);
else
iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
rcu_read_unlock();
spin_unlock(&mvmtxq->tx_path_lock);
}
static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
/*
* Please note that racing is handled very carefully here:
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
* deleted afterwards.
* This means that if:
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
* queue is allocated and we can TX.
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
* a race, should defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
* need to allocate the queue and defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
* queue is already scheduled for allocation, no need to allocate,
* should defer the frame.
*/
/* If the queue is allocated TX and return. */
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
/*
* Check that list is empty to avoid a race where txq_id is
* already updated, but the queue allocation work wasn't
* finished
*/
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
return;
iwl_mvm_mac_itxq_xmit(hw, txq);
return;
}
/* The list is being deleted only after the queue is fully allocated. */
if (!list_empty(&mvmtxq->list))
return;
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk);
}
static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
{ {
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
...@@ -1107,7 +1181,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ...@@ -1107,7 +1181,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm); iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
...@@ -2883,32 +2956,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, ...@@ -2883,32 +2956,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action); peer_addr, action);
} }
static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
{
struct iwl_mvm_tid_data *tid_data;
struct sk_buff *skb;
int i;
spin_lock_bh(&mvm_sta->lock);
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
tid_data = &mvm_sta->tid_data[i];
while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
/*
* The first deferred frame should've stopped the MAC
* queues, so we should never get a second deferred
* frame for the RA/TID.
*/
iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
ieee80211_free_txskb(mvm->hw, skb);
}
}
spin_unlock_bh(&mvm_sta->lock);
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
...@@ -2942,7 +2989,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ...@@ -2942,7 +2989,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
*/ */
if (old_state == IEEE80211_STA_NONE && if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) { new_state == IEEE80211_STA_NOTEXIST) {
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk); flush_work(&mvm->add_stream_wk);
/* /*
...@@ -4680,6 +4726,7 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) ...@@ -4680,6 +4726,7 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
const struct ieee80211_ops iwl_mvm_hw_ops = { const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx, .tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action, .ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start, .start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete, .reconfig_complete = iwl_mvm_mac_reconfig_complete,
......
...@@ -778,6 +778,40 @@ struct iwl_mvm_geo_profile { ...@@ -778,6 +778,40 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE]; u8 values[ACPI_GEO_TABLE_SIZE];
}; };
struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
/* Protects TX path invocation from two places */
spinlock_t tx_path_lock;
bool stopped;
};
static inline struct iwl_mvm_txq *
iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
{
return (void *)txq->drv_priv;
}
static inline struct iwl_mvm_txq *
iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
{
if (tid == IWL_MAX_TID_COUNT)
tid = IEEE80211_NUM_TIDS;
return (void *)sta->txq[tid]->drv_priv;
}
/**
* struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
*
* @sta_id: sta id
* @txq_tid: txq tid
*/
struct iwl_mvm_tvqm_txq_info {
u8 sta_id;
u8 txq_tid;
};
struct iwl_mvm_dqa_txq_info { struct iwl_mvm_dqa_txq_info {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */ bool reserved; /* Is this the TXQ reserved for a STA */
...@@ -843,13 +877,13 @@ struct iwl_mvm { ...@@ -843,13 +877,13 @@ struct iwl_mvm {
u64 on_time_scan; u64 on_time_scan;
} radio_stats, accu_radio_stats; } radio_stats, accu_radio_stats;
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; struct list_head add_stream_txqs;
union {
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
};
struct work_struct add_stream_wk; /* To add streams to queues */ struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name; const char *nvm_file_name;
struct iwl_nvm_data *nvm_data; struct iwl_nvm_data *nvm_data;
/* NVM sections */ /* NVM sections */
...@@ -863,7 +897,6 @@ struct iwl_mvm { ...@@ -863,7 +897,6 @@ struct iwl_mvm {
/* data related to data path */ /* data related to data path */
struct iwl_rx_phy_info last_phy_info; struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
u8 rx_ba_sessions; u8 rx_ba_sessions;
/* configured by mac80211 */ /* configured by mac80211 */
...@@ -1470,6 +1503,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1470,6 +1503,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc); struct ieee80211_sta *sta, __le16 fc);
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
#ifdef CONFIG_IWLWIFI_DEBUG #ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status); const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else #else
...@@ -1599,7 +1634,6 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); ...@@ -1599,7 +1634,6 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override); bool force_assoc_off, const u8 *bssid_override);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif); struct ieee80211_vif *vif);
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
...@@ -1615,8 +1649,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, ...@@ -1615,8 +1649,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif); struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
...@@ -1906,10 +1938,6 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) ...@@ -1906,10 +1938,6 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans); iwl_trans_stop_device(mvm->trans);
} }
/* Stop/start all mac queues in a given bitmap */
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
/* Re-configure the SCD for a queue that has already been configured */ /* Re-configure the SCD for a queue that has already been configured */
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn); int tid, int frame_limit, u16 ssn);
......
...@@ -685,6 +685,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -685,6 +685,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock); spin_lock_init(&mvm->refs_lock);
...@@ -1079,24 +1080,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, ...@@ -1079,24 +1080,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt); iwl_mvm_rx_common(mvm, rxb, pkt);
} }
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{
int q;
if (WARN_ON_ONCE(!mq))
return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
IWL_DEBUG_TX_QUEUES(mvm,
"mac80211 %d already stopped\n", q);
continue;
}
ieee80211_stop_queue(mvm->hw, q);
}
}
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
const struct iwl_device_cmd *cmd) const struct iwl_device_cmd *cmd)
{ {
...@@ -1109,38 +1092,66 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, ...@@ -1109,38 +1092,66 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
iwl_trans_block_txq_ptrs(mvm->trans, false); iwl_trans_block_txq_ptrs(mvm->trans, false);
} }
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
int hw_queue, bool start)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; struct ieee80211_sta *sta;
struct ieee80211_txq *txq;
iwl_mvm_stop_mac_queues(mvm, mq); struct iwl_mvm_txq *mvmtxq;
} int i;
unsigned long tid_bitmap;
struct iwl_mvm_sta *mvmsta;
u8 sta_id;
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) sta_id = iwl_mvm_has_new_tx_api(mvm) ?
{ mvm->tvqm_info[hw_queue].sta_id :
int q; mvm->queue_info[hw_queue].ra_sta_id;
if (WARN_ON_ONCE(!mq)) if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
return; return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { rcu_read_lock();
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
IWL_DEBUG_TX_QUEUES(mvm, sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
"mac80211 %d still stopped\n", q); if (IS_ERR_OR_NULL(sta))
continue; goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (iwl_mvm_has_new_tx_api(mvm)) {
int tid = mvm->tvqm_info[hw_queue].txq_tid;
tid_bitmap = BIT(tid);
} else {
tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
} }
ieee80211_wake_queue(mvm->hw, q); for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int tid = i;
if (tid == IWL_MAX_TID_COUNT)
tid = IEEE80211_NUM_TIDS;
txq = sta->txq[tid];
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
mvmtxq->stopped = !start;
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
} }
out:
rcu_read_unlock();
} }
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); iwl_mvm_queue_state_change(op_mode, hw_queue, false);
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; }
iwl_mvm_start_mac_queues(mvm, mq); static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
iwl_mvm_queue_state_change(op_mode, hw_queue, true);
} }
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
......
...@@ -297,7 +297,6 @@ enum iwl_mvm_agg_state { ...@@ -297,7 +297,6 @@ enum iwl_mvm_agg_state {
/** /**
* struct iwl_mvm_tid_data - holds the states for each RA / TID * struct iwl_mvm_tid_data - holds the states for each RA / TID
* @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use * @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked. * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++). * This is basically (last acked packet++).
...@@ -318,7 +317,6 @@ enum iwl_mvm_agg_state { ...@@ -318,7 +317,6 @@ enum iwl_mvm_agg_state {
* tpt_meas_start * tpt_meas_start
*/ */
struct iwl_mvm_tid_data { struct iwl_mvm_tid_data {
struct sk_buff_head deferred_tx_frames;
u16 seq_number; u16 seq_number;
u16 next_reclaimed; u16 next_reclaimed;
/* The rest is Tx AGG related */ /* The rest is Tx AGG related */
...@@ -427,8 +425,6 @@ struct iwl_mvm_sta { ...@@ -427,8 +425,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data; struct iwl_mvm_rxq_dup_data *dup_data;
u16 deferred_traffic_tid_map;
u8 reserved_queue; u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */ /* Temporary, until the new TLC will control the Tx protection */
......
...@@ -602,11 +602,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, ...@@ -602,11 +602,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
} }
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc) struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr)
{ {
struct iwl_mvm_vif *mvmvif; struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); __le16 fc = hdr->frame_control;
switch (info->control.vif->type) { switch (info->control.vif->type) {
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
...@@ -625,7 +626,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, ...@@ -625,7 +626,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
(!ieee80211_is_bufferable_mmpdu(fc) || (!ieee80211_is_bufferable_mmpdu(fc) ||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
return mvm->probe_queue; return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
is_multicast_ether_addr(hdr->addr1))
return mvmvif->cab_queue; return mvmvif->cab_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
...@@ -634,8 +637,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, ...@@ -634,8 +637,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc)) if (ieee80211_is_mgmt(fc))
return mvm->p2p_dev_queue; return mvm->p2p_dev_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return mvmvif->cab_queue;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return mvm->p2p_dev_queue; return mvm->p2p_dev_queue;
...@@ -713,6 +714,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -713,6 +714,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
u8 sta_id; u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control); int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control; __le16 fc = hdr->frame_control;
bool offchannel = IEEE80211_SKB_CB(skb)->flags &
IEEE80211_TX_CTL_TX_OFFCHAN;
int queue = -1; int queue = -1;
memcpy(&info, skb->cb, sizeof(info)); memcpy(&info, skb->cb, sizeof(info));
...@@ -720,11 +723,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -720,11 +723,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1; return -1;
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
(!info.control.vif ||
info.hw_queue != info.control.vif->cab_queue)))
return -1;
if (info.control.vif) { if (info.control.vif) {
struct iwl_mvm_vif *mvmvif = struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif); iwl_mvm_vif_from_mac80211(info.control.vif);
...@@ -737,14 +735,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -737,14 +735,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
else else
sta_id = mvmvif->mcast_sta.sta_id; sta_id = mvmvif->mcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
hdr->frame_control);
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue; queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id; sta_id = mvm->snif_sta.sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION && } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) { offchannel) {
/* /*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
* that can be used in 2 different types of vifs, P2P & * that can be used in 2 different types of vifs, P2P &
...@@ -758,8 +754,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -758,8 +754,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} }
} }
if (queue < 0) if (queue < 0) {
IWL_ERR(mvm, "No queue was found. Dropping TX\n");
return -1; return -1;
}
if (unlikely(ieee80211_is_probe_resp(fc))) if (unlikely(ieee80211_is_probe_resp(fc)))
iwl_mvm_probe_resp_set_noa(mvm, skb); iwl_mvm_probe_resp_set_noa(mvm, skb);
...@@ -1002,34 +1000,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1002,34 +1000,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
} }
#endif #endif
static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta, u8 tid,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 mac_queue = info->hw_queue;
struct sk_buff_head *deferred_tx_frames;
lockdep_assert_held(&mvm_sta->lock);
mvm_sta->deferred_traffic_tid_map |= BIT(tid);
set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
skb_queue_tail(deferred_tx_frames, skb);
/*
* The first deferred frame should've stopped the MAC queues, so we
* should never get a second deferred frame for the RA/TID.
* In case of GSO the first packet may have been split, so don't warn.
*/
if (skb_queue_len(deferred_tx_frames) == 1) {
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
schedule_work(&mvm->add_stream_wk);
}
}
/* Check if there are any timed-out TIDs on a given shared TXQ */ /* Check if there are any timed-out TIDs on a given shared TXQ */
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
{ {
...@@ -1088,7 +1058,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1088,7 +1058,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc; __le16 fc;
u16 seq_number = 0; u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT; u8 tid = IWL_MAX_TID_COUNT;
u16 txq_id = info->hw_queue; u16 txq_id;
bool is_ampdu = false; bool is_ampdu = false;
int hdrlen; int hdrlen;
...@@ -1152,14 +1122,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1152,14 +1122,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */ if (WARN_ON_ONCE(txq_id == IWL_MVM_INVALID_QUEUE)) {
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment