Commit f3f240f9 authored by Johannes Berg's avatar Johannes Berg Committed by Luca Coelho

iwlwifi: mvm: remove queue_info_lock

All the queue management code runs under mvm->mutex, so there are
only very few cases of accessing the data structures without it:
 * TX path, which doesn't take any locks anyway
 * iwl_mvm_wake_sw_queue() and iwl_mvm_stop_sw_queue() where we
   just (atomically) read a bitmap, so the lock isn't needed.

Therefore, we can remove the spinlock. This enables some cleanup
in the ugly locking in iwl_mvm_inactivity_check().
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 06bc6f6e
......@@ -844,7 +844,6 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
......
......@@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->aux_roc_te_list);
INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock);
spin_lock_init(&mvm->queue_info_lock);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
......@@ -1108,11 +1107,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
iwl_mvm_stop_mac_queues(mvm, mq);
}
......@@ -1138,11 +1133,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
iwl_mvm_start_mac_queues(mvm, mq);
}
......
......@@ -1160,11 +1160,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* If we have timed-out TIDs - schedule the worker that will
* reconfig the queues and update them
*
* Note that the mvm->queue_info_lock isn't being taken here in
* order to not serialize the TX flow. This isn't dangerous
* because scheduling mvm->add_stream_wk can't ruin the state,
* and if we DON'T schedule it due to some race condition then
* next TX we get here we will.
* Note that the no lock is taken here in order to not serialize
* the TX flow. This isn't dangerous because scheduling
* mvm->add_stream_wk can't ruin the state, and if we DON'T
* schedule it due to some race condition then next TX we get
* here we will.
*/
if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED &&
......
......@@ -618,13 +618,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
spin_unlock_bh(&mvm->queue_info_lock);
"Trying to reconfig unallocated queue %d\n", queue))
return -ENXIO;
}
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment