Commit 4d4183c4 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach Committed by Luca Coelho

iwlwifi: mvm: remove the tx defer for d0i3

This is not needed anymore
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 58d3bef4
...@@ -742,42 +742,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -742,42 +742,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret; return ret;
} }
static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct iwl_mvm_sta *mvmsta;
bool defer = false;
/*
* double check the IN_D0I3 flag both before and after
* taking the spinlock, in order to prevent taking
* the spinlock when not needed.
*/
if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
return false;
spin_lock(&mvm->d0i3_tx_lock);
/*
* testing the flag again ensures the skb dequeue
* loop (on d0i3 exit) hasn't run yet.
*/
if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb);
defer = true;
out:
spin_unlock(&mvm->d0i3_tx_lock);
return defer;
}
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control, struct ieee80211_tx_control *control,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -822,8 +786,6 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ...@@ -822,8 +786,6 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
} }
if (sta) { if (sta) {
if (iwl_mvm_defer_tx(mvm, sta, skb))
return;
if (iwl_mvm_tx_skb(mvm, skb, sta)) if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop; goto drop;
return; return;
...@@ -1156,9 +1118,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) ...@@ -1156,9 +1118,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
* would do. * would do.
*/ */
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
#ifdef CONFIG_PM
iwl_mvm_d0i3_enable_tx(mvm, NULL);
#endif
} }
return ret; return ret;
...@@ -1196,9 +1155,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) ...@@ -1196,9 +1155,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
#ifdef CONFIG_PM
iwl_mvm_d0i3_enable_tx(mvm, NULL);
#endif
ret = iwl_mvm_update_quotas(mvm, true, NULL); ret = iwl_mvm_update_quotas(mvm, true, NULL);
if (ret) if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
......
...@@ -1019,12 +1019,9 @@ struct iwl_mvm { ...@@ -1019,12 +1019,9 @@ struct iwl_mvm {
u8 d0i3_ap_sta_id; u8 d0i3_ap_sta_id;
bool d0i3_offloading; bool d0i3_offloading;
struct work_struct d0i3_exit_work; struct work_struct d0i3_exit_work;
struct sk_buff_head d0i3_tx;
/* protect d0i3_suspend_flags */ /* protect d0i3_suspend_flags */
struct mutex d0i3_suspend_mutex; struct mutex d0i3_suspend_mutex;
unsigned long d0i3_suspend_flags; unsigned long d0i3_suspend_flags;
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq; wait_queue_head_t d0i3_exit_waitq;
wait_queue_head_t rx_sync_waitq; wait_queue_head_t rx_sync_waitq;
...@@ -1861,7 +1858,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, ...@@ -1861,7 +1858,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
u32 cmd_flags); u32 cmd_flags);
#ifdef CONFIG_PM #ifdef CONFIG_PM
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode); int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode); int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
......
...@@ -713,8 +713,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -713,8 +713,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs); INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->d0i3_tx_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq); init_waitqueue_head(&mvm->d0i3_exit_waitq);
init_waitqueue_head(&mvm->rx_sync_waitq); init_waitqueue_head(&mvm->rx_sync_waitq);
...@@ -1590,62 +1588,6 @@ static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac, ...@@ -1590,62 +1588,6 @@ static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status); iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
} }
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
{
struct ieee80211_sta *sta = NULL;
struct iwl_mvm_sta *mvm_ap_sta;
int i;
bool wake_queues = false;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->d0i3_tx_lock);
if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
/* get the sta in order to update seq numbers and re-enqueue skbs */
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta)) {
sta = NULL;
goto out;
}
if (mvm->d0i3_offloading && qos_seq) {
/* update qos seq numbers if offloading was enabled */
mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = le16_to_cpu(qos_seq[i]);
/* firmware stores last-used one, we store next one */
seq += 0x10;
mvm_ap_sta->tid_data[i].seq_number = seq;
}
}
out:
/* re-enqueue (or drop) all packets */
while (!skb_queue_empty(&mvm->d0i3_tx)) {
struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
ieee80211_free_txskb(mvm->hw, skb);
/* if the skb_queue is not empty, we need to wake queues */
wake_queues = true;
}
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
if (wake_queues)
ieee80211_wake_queues(mvm->hw);
spin_unlock_bh(&mvm->d0i3_tx_lock);
}
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{ {
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
...@@ -1655,7 +1597,6 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) ...@@ -1655,7 +1597,6 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
struct iwl_wowlan_status *status; struct iwl_wowlan_status *status;
u32 wakeup_reasons = 0; u32 wakeup_reasons = 0;
__le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
...@@ -1667,7 +1608,6 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) ...@@ -1667,7 +1608,6 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
} }
wakeup_reasons = le32_to_cpu(status->wakeup_reasons); wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
qos_seq = status->qos_seq_ctr;
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
...@@ -1678,12 +1618,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) ...@@ -1678,12 +1618,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_mvm_d0i3_exit_work_iter, iwl_mvm_d0i3_exit_work_iter,
&iter_data); &iter_data);
out: out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
wakeup_reasons); wakeup_reasons);
/* qos_seq might point inside resp_pkt, so free it only now */
kfree(status); kfree(status);
/* the FW might have updated the regdomain */ /* the FW might have updated the regdomain */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment