Commit c8f54701 authored by Johannes Berg's avatar Johannes Berg Committed by Luca Coelho

iwlwifi: mvm: remove non-DQA mode

All the firmware versions the driver supports enable DQA, and thus
the only way to get non-DQA mode is to modify the source. Remove
this mode to simplify the code.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent d197358b
...@@ -62,12 +62,6 @@ ...@@ -62,12 +62,6 @@
#ifndef __iwl_fw_api_txq_h__ #ifndef __iwl_fw_api_txq_h__
#define __iwl_fw_api_txq_h__ #define __iwl_fw_api_txq_h__
/* Tx queue numbers for non-DQA mode */
enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9,
};
/* /*
* DQA queue numbers * DQA queue numbers
* *
......
...@@ -331,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -331,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/ */
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
if (iwl_mvm_is_dqa_supported(mvm))
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
else
mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0); atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
...@@ -1137,14 +1134,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1137,14 +1134,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* reset quota debouncing buffer - 0xff will yield invalid data */ /* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
/* Enable DQA-mode if required */
if (iwl_mvm_is_dqa_supported(mvm)) {
ret = iwl_mvm_send_dqa_cmd(mvm); ret = iwl_mvm_send_dqa_cmd(mvm);
if (ret) if (ret)
goto error; goto error;
} else {
IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
}
/* Add auxiliary station for scanning */ /* Add auxiliary station for scanning */
ret = iwl_mvm_add_aux_sta(mvm); ret = iwl_mvm_add_aux_sta(mvm);
......
...@@ -241,32 +241,17 @@ static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, ...@@ -241,32 +241,17 @@ static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
} }
static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
struct ieee80211_sta *sta)
{
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* Mark the queues used by the sta */
data->used_hw_queues |= mvmsta->tfd_queue_msk;
}
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif) struct ieee80211_vif *exclude_vif)
{ {
u8 sta_id;
struct iwl_mvm_hw_queues_iface_iterator_data data = { struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif, .exclude_vif = exclude_vif,
.used_hw_queues = .used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) | BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue), BIT(mvm->aux_queue) |
BIT(IWL_MVM_DQA_GCAST_QUEUE),
}; };
if (iwl_mvm_is_dqa_supported(mvm))
data.used_hw_queues |= BIT(IWL_MVM_DQA_GCAST_QUEUE);
else
data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */ /* mark all VIF used hw queues */
...@@ -274,26 +259,6 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, ...@@ -274,26 +259,6 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_iface_hw_queues_iter, &data); iwl_mvm_iface_hw_queues_iter, &data);
/*
* for DQA, the hw_queue in mac80211 is never really used for
* real traffic (only the few queue IDs covered above), so
* we can reuse the real HW queue IDs the stations use
*/
if (iwl_mvm_is_dqa_supported(mvm))
return data.used_hw_queues;
/* don't assign the same hw queues as TDLS stations */
ieee80211_iterate_stations_atomic(mvm->hw,
iwl_mvm_mac_sta_hw_queues_iter,
&data);
/*
* Some TDLS stations may be removed but are in the process of being
* drained. Don't touch their queues.
*/
for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
data.used_hw_queues |= mvm->tfd_drained[sta_id];
return data.used_hw_queues; return data.used_hw_queues;
} }
...@@ -344,8 +309,7 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, ...@@ -344,8 +309,7 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
NUM_TSF_IDS); NUM_TSF_IDS);
} }
static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct ieee80211_vif *vif)
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_mac_iface_iterator_data data = { struct iwl_mvm_mac_iface_iterator_data data = {
...@@ -361,6 +325,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -361,6 +325,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
int ret, i, queue_limit; int ret, i, queue_limit;
unsigned long used_hw_queues; unsigned long used_hw_queues;
lockdep_assert_held(&mvm->mutex);
/* /*
* Allocate a MAC ID and a TSF for this MAC, along with the queues * Allocate a MAC ID and a TSF for this MAC, along with the queues
* and other resources. * and other resources.
...@@ -444,7 +410,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -444,7 +410,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0; return 0;
} }
if (iwl_mvm_is_dqa_supported(mvm)) {
/* /*
* queues in mac80211 almost entirely independent of * queues in mac80211 almost entirely independent of
* the ones here - no real limit * the ones here - no real limit
...@@ -453,10 +418,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -453,10 +418,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
BUILD_BUG_ON(IEEE80211_MAX_QUEUES > BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
BITS_PER_BYTE * BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0])); sizeof(mvm->hw_queue_to_mac80211[0]));
} else {
/* need to not use too many in this case */
queue_limit = mvm->first_agg_queue;
}
/* /*
* Find available queues, and allocate them to the ACs. When in * Find available queues, and allocate them to the ACs. When in
...@@ -478,27 +439,12 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -478,27 +439,12 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */ /* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) { if (vif->type == NL80211_IFTYPE_AP) {
u8 queue;
if (!iwl_mvm_is_dqa_supported(mvm)) {
queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue);
if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate cab queue\n");
ret = -EIO;
goto exit_fail;
}
} else {
queue = IWL_MVM_DQA_GCAST_QUEUE;
}
/* /*
* For TVQM this will be overwritten later with the FW assigned * For TVQM this will be overwritten later with the FW assigned
* queue value (when queue is enabled). * queue value (when queue is enabled).
*/ */
mvmvif->cab_queue = queue; mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
vif->cab_queue = queue; vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
} else { } else {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
} }
...@@ -519,78 +465,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -519,78 +465,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return ret; return ret;
} }
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
u32 ac;
int ret;
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
if (ret)
return ret;
/* If DQA is supported - queues will be enabled when needed */
if (iwl_mvm_is_dqa_supported(mvm))
return 0;
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], 0,
wdg_timeout);
break;
}
return 0;
}
void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
int ac;
lockdep_assert_held(&mvm->mutex);
/*
* If DQA is supported - queues were already disabled, since in
* DQA-mode the queues are a property of the STA and not of the
* vif, and at this point the STA was already deleted
*/
if (iwl_mvm_is_dqa_supported(mvm))
return;
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
IWL_MAX_TID_COUNT, 0);
}
}
static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
enum nl80211_band band, enum nl80211_band band,
...@@ -914,18 +788,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, ...@@ -914,18 +788,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
{ {
struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_ctx_cmd cmd = {};
u32 tfd_queue_msk = 0; u32 tfd_queue_msk = 0;
int ret, i; int ret;
WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
if (!iwl_mvm_is_dqa_supported(mvm)) {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
tfd_queue_msk |= BIT(vif->hw_queue[i]);
}
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_CONTROL_AND_MGMT |
MAC_FILTER_IN_BEACON | MAC_FILTER_IN_BEACON |
......
...@@ -464,9 +464,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -464,9 +464,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->trans->max_skb_frags) if (mvm->trans->max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
if (!iwl_mvm_is_dqa_supported(mvm))
hw->queues = mvm->first_agg_queue;
else
hw->queues = IEEE80211_MAX_QUEUES; hw->queues = IEEE80211_MAX_QUEUES;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
...@@ -1067,9 +1064,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ...@@ -1067,9 +1064,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm); iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
...@@ -1372,7 +1367,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ...@@ -1372,7 +1367,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release; goto out_release;
} }
if (iwl_mvm_is_dqa_supported(mvm)) {
/* /*
* Only queue for this station is the mcast queue, * Only queue for this station is the mcast queue,
* which shouldn't be in TFD mask anyway * which shouldn't be in TFD mask anyway
...@@ -1382,7 +1376,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ...@@ -1382,7 +1376,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IWL_STA_MULTICAST); IWL_STA_MULTICAST);
if (ret) if (ret)
goto out_release; goto out_release;
}
iwl_mvm_vif_dbgfs_register(mvm, vif); iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock; goto out_unlock;
...@@ -1456,8 +1449,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ...@@ -1456,8 +1449,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_release: out_release:
if (vif->type != NL80211_IFTYPE_P2P_DEVICE) if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--; mvm->vif_count--;
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock: out_unlock:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
...@@ -1469,40 +1460,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ...@@ -1469,40 +1460,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) {
/*
* mac80211 first removes all the stations of the vif and
* then removes the vif. When it removes a station it also
* flushes the AMPDU session. So by now, all the AMPDU sessions
* of all the stations of this vif are closed, and the queues
* of these AMPDU sessions are properly closed.
* We still need to take care of the shared queues of the vif.
* Flush them here.
* For DQA mode there is no need - broacast and multicast queue
* are flushed separately.
*/
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
mutex_unlock(&mvm->mutex);
/*
* There are transports that buffer a few frames in the host.
* For these, the flush above isn't enough since while we were
* flushing, the transport might have sent more frames to the
* device. To solve this, wait here until the transport is
* empty. Technically, this could have replaced the flush
* above, but flush is much faster than draining. So flush
* first, and drain to make sure we have no frames in the
* transport anymore.
* If a station still had frames on the shared queues, it is
* already marked as draining, so to complete the draining, we
* just need to wait until the transport is empty.
*/
iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
}
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
/* /*
* Flush the ROC worker which will flush the OFFCHANNEL queue. * Flush the ROC worker which will flush the OFFCHANNEL queue.
...@@ -1510,14 +1467,6 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, ...@@ -1510,14 +1467,6 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* queue are sent in ROC session. * queue are sent in ROC session.
*/ */
flush_work(&mvm->roc_done_wk); flush_work(&mvm->roc_done_wk);
} else {
/*
* By now, all the AC queues are empty. The AGG queues are
* empty too. We already got all the Tx responses for all the
* packets in the queues. The drain work can have been
* triggered. Flush it.
*/
flush_work(&mvm->sta_drained_wk);
} }
} }
...@@ -1571,7 +1520,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, ...@@ -1571,7 +1520,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_remove(mvm, vif); iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release: out_release:
iwl_mvm_mac_ctxt_release(mvm, vif);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
...@@ -2419,11 +2367,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, ...@@ -2419,11 +2367,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
if (!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
continue; continue;
...@@ -2437,9 +2380,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, ...@@ -2437,9 +2380,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
switch (cmd) { switch (cmd) {
case STA_NOTIFY_SLEEP: case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
ieee80211_sta_set_buffered(sta, tid, true); ieee80211_sta_set_buffered(sta, tid, true);
...@@ -2632,9 +2572,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ...@@ -2632,9 +2572,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
return -EINVAL; return -EINVAL;
/* if a STA is being removed, reuse its ID */
flush_work(&mvm->sta_drained_wk);
/* /*
* If we are in a STA removal flow and in DQA mode: * If we are in a STA removal flow and in DQA mode:
* *
...@@ -2649,8 +2586,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ...@@ -2649,8 +2586,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* make sure the worker is no longer handling frames for this STA. * make sure the worker is no longer handling frames for this STA.
*/ */
if (old_state == IEEE80211_STA_NONE && if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NOTEXIST) {
iwl_mvm_is_dqa_supported(mvm)) {
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk); flush_work(&mvm->add_stream_wk);
...@@ -4032,7 +3968,6 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, ...@@ -4032,7 +3968,6 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
return; return;
/* Make sure we're done with the deferred traffic before flushing */ /* Make sure we're done with the deferred traffic before flushing */
if (iwl_mvm_is_dqa_supported(mvm))
flush_work(&mvm->add_stream_wk); flush_work(&mvm->add_stream_wk);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
......
...@@ -121,6 +121,9 @@ ...@@ -121,6 +121,9 @@
*/ */
#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
/* offchannel queue towards mac80211 */
#define IWL_MVM_OFFCHANNEL_QUEUE 0
extern const struct ieee80211_ops iwl_mvm_hw_ops; extern const struct ieee80211_ops iwl_mvm_hw_ops;
/** /**
...@@ -783,11 +786,7 @@ struct iwl_mvm { ...@@ -783,11 +786,7 @@ struct iwl_mvm {
/* data related to data path */ /* data related to data path */
struct iwl_rx_phy_info last_phy_info; struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk;
unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions; u8 rx_ba_sessions;
/* configured by mac80211 */ /* configured by mac80211 */
...@@ -960,9 +959,6 @@ struct iwl_mvm { ...@@ -960,9 +959,6 @@ struct iwl_mvm {
u16 probe_queue; u16 probe_queue;
u16 p2p_dev_queue; u16 p2p_dev_queue;
u8 first_agg_queue;
u8 last_agg_queue;
/* Indicate if device power save is allowed */ /* Indicate if device power save is allowed */
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
unsigned int max_amsdu_len; /* used for debugfs only */ unsigned int max_amsdu_len; /* used for debugfs only */
...@@ -1125,12 +1121,6 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) ...@@ -1125,12 +1121,6 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
} }
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
}
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{ {
/* For now we only use this mode to differentiate between /* For now we only use this mode to differentiate between
...@@ -1469,7 +1459,6 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); ...@@ -1469,7 +1459,6 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
/* MAC (virtual interface) programming */ /* MAC (virtual interface) programming */
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override); bool force_assoc_off, const u8 *bssid_override);
...@@ -1720,10 +1709,6 @@ bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -1720,10 +1709,6 @@ bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout); u8 sta_id, u8 tid, unsigned int timeout);
/*
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
*/
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags); u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
...@@ -1733,25 +1718,8 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); ...@@ -1733,25 +1718,8 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
*/ */
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
{ {
u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
IWL_MVM_CMD_QUEUE;
return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
~BIT(cmd_queue)); ~BIT(IWL_MVM_DQA_CMD_QUEUE));
}
static inline
void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 fifo, u16 ssn, unsigned int wdg_timeout)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
} }
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
......
...@@ -623,27 +623,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -623,27 +623,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
if (!iwl_mvm_is_dqa_supported(mvm)) {
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
BUILD_BUG_ON(BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]) < 12);
} else {
mvm->aux_queue = 15;
mvm->first_agg_queue = 16;
BUILD_BUG_ON(BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]) < 16);
}
} else {
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
mvm->sf_state = SF_UNINIT; mvm->sf_state = SF_UNINIT;
if (iwl_mvm_has_unified_ucode(mvm)) if (iwl_mvm_has_unified_ucode(mvm))
iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
...@@ -662,7 +645,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -662,7 +645,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
...@@ -714,10 +696,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -714,10 +696,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
if (iwl_mvm_is_dqa_supported(mvm))
trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
else
trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true; trans_cfg.scd_set_active = true;
......
...@@ -222,16 +222,7 @@ struct iwl_mvm_vif; ...@@ -222,16 +222,7 @@ struct iwl_mvm_vif;
* we remove the STA of the AP. The flush can be done synchronously against the * we remove the STA of the AP. The flush can be done synchronously against the
* fw. * fw.
* Drain means that the fw will drop all the frames sent to a specific station. * Drain means that the fw will drop all the frames sent to a specific station.
* This is useful when a client (if we are IBSS / GO or AP) disassociates. In * This is useful when a client (if we are IBSS / GO or AP) disassociates.
* that case, we need to drain all the frames for that client from the AC queues
* that are shared with the other clients. Only then, we can remove the STA in
* the fw. In order to do so, we track the non-AMPDU packets for each station.
* If mac80211 removes a STA and if it still has non-AMPDU packets pending in
* the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
* the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
* (we know about it with its Tx response), we remove the station in fw and set
* it as %NULL in %fw_id_to_mac_id: this is the purpose of
* %iwl_mvm_sta_drained_wk.
*/ */
/** /**
...@@ -371,7 +362,6 @@ struct iwl_mvm_rxq_dup_data { ...@@ -371,7 +362,6 @@ struct iwl_mvm_rxq_dup_data {
* struct iwl_mvm_sta - representation of a station in the driver * struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
* @tfd_queue_msk: the tfd queues used by the station * @tfd_queue_msk: the tfd queues used by the station
* @hw_queue: per-AC mapping of the TFD queues used by station
* @mac_id_n_color: the MAC context this station is linked to * @mac_id_n_color: the MAC context this station is linked to
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid. * tid.
...@@ -409,7 +399,6 @@ struct iwl_mvm_rxq_dup_data { ...@@ -409,7 +399,6 @@ struct iwl_mvm_rxq_dup_data {
struct iwl_mvm_sta { struct iwl_mvm_sta {
u32 sta_id; u32 sta_id;
u32 tfd_queue_msk; u32 tfd_queue_msk;
u8 hw_queue[IEEE80211_NUM_ACS];
u32 mac_id_n_color; u32 mac_id_n_color;
u16 tid_disable_agg; u16 tid_disable_agg;
u8 max_agg_bufsize; u8 max_agg_bufsize;
...@@ -548,7 +537,6 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); ...@@ -548,7 +537,6 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
void iwl_mvm_sta_drained_wk(struct work_struct *wk);
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
......
...@@ -129,10 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) ...@@ -129,10 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* issue as it will have to complete before the next command is * issue as it will have to complete before the next command is
* executed, and a new time event means a new command. * executed, and a new time event means a new command.
*/ */
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
else
iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
} }
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
......
...@@ -552,9 +552,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, ...@@ -552,9 +552,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
{ {
struct iwl_mvm_vif *mvmvif; struct iwl_mvm_vif *mvmvif;
if (!iwl_mvm_is_dqa_supported(mvm))
return info->hw_queue;
mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
switch (info->control.vif->type) { switch (info->control.vif->type) {
...@@ -653,8 +650,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -653,8 +650,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_INVALID_STA) if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id; sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) && } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->aux_queue; queue = mvm->aux_queue;
} }
} }
...@@ -673,17 +669,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -673,17 +669,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1; return -1;
} }
/*
* Increase the pending frames counter, so that later when a reply comes
* in and the counter is decreased - we don't start getting negative
* values.
* Note that we don't need to make sure it isn't agg'd, since we're
* TXing non-sta
* For DQA mode - we shouldn't increase it though
*/
if (!iwl_mvm_is_dqa_supported(mvm))
atomic_inc(&mvm->pending_frames[sta_id]);
return 0; return 0;
} }
...@@ -994,22 +979,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -994,22 +979,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
} }
} }
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id; txq_id = mvmsta->tid_data[tid].txq_id;
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */ /* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active) && !mvmsta->tid_data[tid].is_tid_active)) {
iwl_mvm_is_dqa_supported(mvm)) {
/* If TXQ needs to be allocated... */ /* If TXQ needs to be allocated... */
if (txq_id == IWL_MVM_INVALID_QUEUE) { if (txq_id == IWL_MVM_INVALID_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
...@@ -1036,7 +1012,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1036,7 +1012,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id); txq_id);
} }
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { if (!iwl_mvm_has_new_tx_api(mvm)) {
/* Keep track of the time of the last frame for this RA/TID */ /* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
...@@ -1070,10 +1046,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1070,10 +1046,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
/* Increase pending frames count if this isn't AMPDU or DQA queue */
if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0; return 0;
drop_unlock_sta: drop_unlock_sta:
...@@ -1142,8 +1114,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, ...@@ -1142,8 +1114,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON || if ((tid_data->state == IWL_AGG_ON ||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
iwl_mvm_is_dqa_supported(mvm)) &&
iwl_mvm_tid_queued(mvm, tid_data) == 0) { iwl_mvm_tid_queued(mvm, tid_data) == 0) {
/* /*
* Now that this aggregation or DQA queue is empty tell * Now that this aggregation or DQA queue is empty tell
...@@ -1177,13 +1148,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, ...@@ -1177,13 +1148,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n", "Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed); tid_data->next_reclaimed);
if (!iwl_mvm_is_dqa_supported(mvm)) {
u8 mac80211_ac = tid_to_mac80211_ac[tid];
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
vif->hw_queue[mac80211_ac], tid,
CMD_ASYNC);
}
tid_data->state = IWL_AGG_OFF; tid_data->state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break; break;
...@@ -1381,10 +1345,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1381,10 +1345,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_ACK;
break; break;
case TX_STATUS_FAIL_DEST_PS: case TX_STATUS_FAIL_DEST_PS:
/* In DQA, the FW should have stopped the queue and not /* the FW should have stopped the queue and not
* return this status * return this status
*/ */
WARN_ON(iwl_mvm_is_dqa_supported(mvm)); WARN_ON(1);
info->flags |= IEEE80211_TX_STAT_TX_FILTERED; info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
break; break;
default: default:
...@@ -1440,9 +1404,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1440,9 +1404,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status(mvm->hw, skb); ieee80211_tx_status(mvm->hw, skb);
} }
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) { /* This is an aggregation queue or might become one, so we use
/* If this is an aggregation queue, we use the ssn since: * the ssn since: ssn = wifi seq_num % 256.
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which * The seq_ctl is the sequence control of the packet to which
* this Tx response relates. But if there is a hole in the * this Tx response relates. But if there is a hole in the
* bitmap of the BA we received, this Tx response may allow to * bitmap of the BA we received, this Tx response may allow to
...@@ -1456,10 +1419,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1456,10 +1419,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* treated (acked / dropped) + 1. * treated (acked / dropped) + 1.
*/ */
next_reclaimed = ssn; next_reclaimed = ssn;
} else {
/* The next packet to be reclaimed is the one after this one */
next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
}
IWL_DEBUG_TX_REPLY(mvm, IWL_DEBUG_TX_REPLY(mvm,
"TXQ %d status %s (0x%08x)\n", "TXQ %d status %s (0x%08x)\n",
...@@ -1542,49 +1501,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1542,49 +1501,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
mvmsta = NULL; mvmsta = NULL;
} }
/*
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
goto out;
/* We can't free more than one frame at once on a shared queue */
WARN_ON(skb_freed > 1);
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
goto out;
if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
/*
* If there are no pending frames for this STA and
* the tx to this station is not disabled, notify
* mac80211 that this station can now wake up in its
* STA table.
* If mvmsta is not NULL, sta is valid.
*/
spin_lock_bh(&mvmsta->lock);
if (!mvmsta->disable_tx)
ieee80211_sta_block_awake(mvm->hw, sta, false);
spin_unlock_bh(&mvmsta->lock);
}
if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
/*
* We are draining and this was the last packet - pre_rcu_remove
* has been called already. We might be after the
* synchronize_net already.
* Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
*/
set_bit(sta_id, mvm->sta_drained);
schedule_work(&mvm->sta_drained_wk);
}
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1648,9 +1564,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, ...@@ -1648,9 +1564,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
int queue = SEQ_TO_QUEUE(sequence); int queue = SEQ_TO_QUEUE(sequence);
if (WARN_ON_ONCE(queue < mvm->first_agg_queue && if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
(!iwl_mvm_is_dqa_supported(mvm) || (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
return; return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment