Commit f3f240f9 authored by Johannes Berg's avatar Johannes Berg Committed by Luca Coelho

iwlwifi: mvm: remove queue_info_lock

All the queue management code runs under mvm->mutex, so there are
only very few cases of accessing the data structures without it:
 * TX path, which doesn't take any locks anyway
 * iwl_mvm_wake_sw_queue() and iwl_mvm_stop_sw_queue() where we
   just (atomically) read a bitmap, so the lock isn't needed.

Therefore, we can remove the spinlock. This enables some cleanup
in the ugly locking in iwl_mvm_inactivity_check().
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 06bc6f6e
...@@ -844,7 +844,6 @@ struct iwl_mvm { ...@@ -844,7 +844,6 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */ struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
......
...@@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list);
INIT_LIST_HEAD(&mvm->async_handlers_list); INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock); spin_lock_init(&mvm->time_event_lock);
spin_lock_init(&mvm->queue_info_lock);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
...@@ -1108,11 +1107,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, ...@@ -1108,11 +1107,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq; unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_stop_mac_queues(mvm, mq); iwl_mvm_stop_mac_queues(mvm, mq);
} }
...@@ -1138,11 +1133,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) ...@@ -1138,11 +1133,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq; unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_start_mac_queues(mvm, mq); iwl_mvm_start_mac_queues(mvm, mq);
} }
......
...@@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, ...@@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock(); rcu_read_lock();
...@@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, ...@@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
return -EINVAL; return -EINVAL;
if (iwl_mvm_has_new_tx_api(mvm)) { if (iwl_mvm_has_new_tx_api(mvm)) {
spin_lock_bh(&mvm->queue_info_lock);
if (remove_mac_queue) if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &= mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue); ~BIT(mac80211_queue);
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_free(mvm->trans, queue); iwl_trans_txq_free(mvm->trans, queue);
return 0; return 0;
} }
spin_lock_bh(&mvm->queue_info_lock); if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
spin_unlock_bh(&mvm->queue_info_lock);
return 0; return 0;
}
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
...@@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, ...@@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
mvm->hw_queue_to_mac80211[queue]); mvm->hw_queue_to_mac80211[queue]);
/* If the queue is still enabled - nothing left to do in this func */ /* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE) { if (cmd.action == SCD_CFG_ENABLE_QUEUE)
spin_unlock_bh(&mvm->queue_info_lock);
return 0; return 0;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tid = mvm->queue_info[queue].txq_tid; cmd.tid = mvm->queue_info[queue].txq_tid;
...@@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, ...@@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
/* Regardless if this is a reserved TXQ for a STA - mark it as false */ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false; mvm->queue_info[queue].reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false); iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
...@@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) ...@@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
...@@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) ...@@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock(); rcu_read_lock();
...@@ -572,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, ...@@ -572,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac; txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid; tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
same_sta = sta_id == new_sta_id; same_sta = sta_id == new_sta_id;
...@@ -620,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, ...@@ -620,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* by the inactivity checker. * by the inactivity checker.
*/ */
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
...@@ -706,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -706,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
* value 3 and VO with value 0, so to check if ac X is lower than ac Y * value 3 and VO with value 0, so to check if ac X is lower than ac Y
* we need to check if the numerical value of X is LARGER than of Y. * we need to check if the numerical value of X is LARGER than of Y.
*/ */
spin_lock_bh(&mvm->queue_info_lock);
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"No redirection needed on TXQ #%d\n", "No redirection needed on TXQ #%d\n",
queue); queue);
...@@ -721,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -721,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.tid = mvm->queue_info[queue].txq_tid; cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue]; mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]); queue, iwl_mvm_ac_to_tx_fifo[ac]);
...@@ -747,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -747,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* Update the TID "owner" of the queue */ /* Update the TID "owner" of the queue */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid; mvm->queue_info[queue].txq_tid = tid;
spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
...@@ -758,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -758,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
/* Update AC marking of the queue */ /* Update AC marking of the queue */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].mac80211_ac = ac; mvm->queue_info[queue].mac80211_ac = ac;
spin_unlock_bh(&mvm->queue_info_lock);
/* /*
* Mark queue as shared in transport if shared * Mark queue as shared in transport if shared
...@@ -783,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, ...@@ -783,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
{ {
int i; int i;
lockdep_assert_held(&mvm->queue_info_lock); lockdep_assert_held(&mvm->mutex);
/* This should not be hit with new TX path */ /* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
...@@ -863,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, ...@@ -863,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
{ {
bool enable_queue = true; bool enable_queue = true;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure this TID isn't already enabled */ /* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
queue, tid); queue, tid);
return false; return false;
...@@ -903,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, ...@@ -903,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
queue, mvm->queue_info[queue].tid_bitmap, queue, mvm->queue_info[queue].tid_bitmap,
mvm->hw_queue_to_mac80211[queue]); mvm->hw_queue_to_mac80211[queue]);
spin_unlock_bh(&mvm->queue_info_lock);
return enable_queue; return enable_queue;
} }
...@@ -959,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) ...@@ -959,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return; return;
spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
return; return;
...@@ -978,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) ...@@ -978,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
return; return;
} }
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid; mvm->queue_info[queue].txq_tid = tid;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
queue, tid); queue, tid);
} }
...@@ -1002,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) ...@@ -1002,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
/* Find TID for queue, and make sure it is the only one on the queue */ /* Find TID for queue, and make sure it is the only one on the queue */
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
...@@ -1062,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) ...@@ -1062,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
} }
} }
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
} }
/* /*
...@@ -1083,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, ...@@ -1083,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
int tid; int tid;
lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock); lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return false; return false;
...@@ -1184,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) ...@@ -1184,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
if (iwl_mvm_has_new_tx_api(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
return -ENOSPC; return -ENOSPC;
spin_lock_bh(&mvm->queue_info_lock);
rcu_read_lock(); rcu_read_lock();
/* we skip the CMD queue below by starting at 1 */ /* we skip the CMD queue below by starting at 1 */
...@@ -1240,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) ...@@ -1240,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* this isn't so nice, but works OK due to the way we loop */ spin_lock_bh(&mvmsta->lock);
spin_unlock(&mvm->queue_info_lock);
/* and we need this locking order */
spin_lock(&mvmsta->lock);
spin_lock(&mvm->queue_info_lock);
ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
inactive_tid_bitmap, inactive_tid_bitmap,
&unshare_queues, &unshare_queues,
...@@ -1253,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) ...@@ -1253,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
if (ret >= 0 && free_queue < 0) if (ret >= 0 && free_queue < 0)
free_queue = ret; free_queue = ret;
/* only unlock sta lock - we still need the queue info lock */ /* only unlock sta lock - we still need the queue info lock */
spin_unlock(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_bh(&mvm->queue_info_lock);
/* Reconfigure queues requiring reconfiguation */ /* Reconfigure queues requiring reconfiguation */
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
...@@ -1306,8 +1256,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -1306,8 +1256,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
tfd_queue_mask = mvmsta->tfd_queue_msk; tfd_queue_mask = mvmsta->tfd_queue_msk;
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
spin_lock_bh(&mvm->queue_info_lock);
/* /*
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
* exists * exists
...@@ -1337,12 +1285,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -1337,12 +1285,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE); IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) { if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
/* try harder - perhaps kill an inactive queue */ /* try harder - perhaps kill an inactive queue */
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
spin_lock_bh(&mvm->queue_info_lock);
} }
/* No free queue - we'll have to share */ /* No free queue - we'll have to share */
...@@ -1363,8 +1307,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -1363,8 +1307,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (queue > 0 && !shared_queue) if (queue > 0 && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
/* This shouldn't happen - out of queues */ /* This shouldn't happen - out of queues */
if (WARN_ON(queue <= 0)) { if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
...@@ -1566,8 +1508,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -1566,8 +1508,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
/* run the general cleanup/unsharing of queues */ /* run the general cleanup/unsharing of queues */
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */ /* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
...@@ -1579,19 +1519,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -1579,19 +1519,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE); IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) { if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
/* try again - this time kick out a queue if needed */ /* try again - this time kick out a queue if needed */
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
if (queue < 0) { if (queue < 0) {
IWL_ERR(mvm, "No available queues for new station\n"); IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC; return -ENOSPC;
} }
spin_lock_bh(&mvm->queue_info_lock);
} }
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
spin_unlock_bh(&mvm->queue_info_lock);
mvmsta->reserved_queue = queue; mvmsta->reserved_queue = queue;
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
...@@ -2014,18 +1950,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -2014,18 +1950,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* is still marked as IWL_MVM_QUEUE_RESERVED, and * is still marked as IWL_MVM_QUEUE_RESERVED, and
* should be manually marked as free again * should be manually marked as free again
*/ */
spin_lock_bh(&mvm->queue_info_lock);
status = &mvm->queue_info[reserved_txq].status; status = &mvm->queue_info[reserved_txq].status;
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE), (*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d", "sta_id %d reserved txq %d status %d",
sta_id, reserved_txq, *status)) { sta_id, reserved_txq, *status))
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL; return -EINVAL;
}
*status = IWL_MVM_QUEUE_FREE; *status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
} }
if (vif->type == NL80211_IFTYPE_STATION && if (vif->type == NL80211_IFTYPE_STATION &&
...@@ -2883,8 +2815,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2883,8 +2815,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO; return -EIO;
} }
spin_lock(&mvm->queue_info_lock);
/* /*
* Note the possible cases: * Note the possible cases:
* 1. An enabled TXQ - TXQ needs to become agg'ed * 1. An enabled TXQ - TXQ needs to become agg'ed
...@@ -2899,7 +2829,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2899,7 +2829,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (txq_id < 0) { if (txq_id < 0) {
ret = txq_id; ret = txq_id;
IWL_ERR(mvm, "Failed to allocate agg queue\n"); IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks; goto out;
} }
/* TXQ hasn't yet been enabled, so mark it only as reserved */ /* TXQ hasn't yet been enabled, so mark it only as reserved */
...@@ -2910,11 +2840,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2910,11 +2840,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n", "Can't start tid %d agg on shared queue!\n",
tid); tid);
goto release_locks; goto out;
} }
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n", "AGG for tid %d will be on queue #%d\n",
tid, txq_id); tid, txq_id);
...@@ -2945,10 +2873,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2945,10 +2873,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
ret = 0; ret = 0;
goto out;
release_locks:
spin_unlock(&mvm->queue_info_lock);
out: out:
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
...@@ -3017,9 +2942,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -3017,9 +2942,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvm->queue_info_lock);
queue_status = mvm->queue_info[queue].status; queue_status = mvm->queue_info[queue].status;
spin_unlock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */ /* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
...@@ -3065,9 +2988,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -3065,9 +2988,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
/* No need to mark as reserved */ /* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
out: out:
/* /*
...@@ -3093,10 +3014,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, ...@@ -3093,10 +3014,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
{ {
u16 txq_id = tid_data->txq_id; u16 txq_id = tid_data->txq_id;
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_has_new_tx_api(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
return; return;
spin_lock_bh(&mvm->queue_info_lock);
/* /*
* The TXQ is marked as reserved only if no traffic came through yet * The TXQ is marked as reserved only if no traffic came through yet
* This means no traffic has been sent on this TID (agg'd or not), so * This means no traffic has been sent on this TID (agg'd or not), so
...@@ -3108,8 +3030,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, ...@@ -3108,8 +3030,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
tid_data->txq_id = IWL_MVM_INVALID_QUEUE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
} }
spin_unlock_bh(&mvm->queue_info_lock);
} }
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
......
...@@ -1160,11 +1160,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1160,11 +1160,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* If we have timed-out TIDs - schedule the worker that will * If we have timed-out TIDs - schedule the worker that will
* reconfig the queues and update them * reconfig the queues and update them
* *
* Note that the mvm->queue_info_lock isn't being taken here in * Note that the no lock is taken here in order to not serialize
* order to not serialize the TX flow. This isn't dangerous * the TX flow. This isn't dangerous because scheduling
* because scheduling mvm->add_stream_wk can't ruin the state, * mvm->add_stream_wk can't ruin the state, and if we DON'T
* and if we DON'T schedule it due to some race condition then * schedule it due to some race condition then next TX we get
* next TX we get here we will. * here we will.
*/ */
if (unlikely(mvm->queue_info[txq_id].status == if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED && IWL_MVM_QUEUE_SHARED &&
......
...@@ -618,13 +618,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, ...@@ -618,13 +618,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].tid_bitmap == 0, if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) { "Trying to reconfig unallocated queue %d\n", queue))
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO; return -ENXIO;
}
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment