Commit 0f8084cd authored by Mordechay Goodstein's avatar Mordechay Goodstein Committed by Johannes Berg

iwlwifi: mvm: avoid races in rate init and rate perform

Rate perform uses the lq_sta table to calculate the next rate to scale
while rate init resets the same table,

Rate perform is done in soft irq context in parallel to rate init
that can be called in case we are doing changes like AP changes BW
or moving state for auth to assoc.
Signed-off-by: default avatarMordechay Goodstein <mordechay.goodstein@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent cfb21b11
...@@ -1197,6 +1197,27 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr) ...@@ -1197,6 +1197,27 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
return tid; return tid;
} }
void iwl_mvm_rs_init_wk(struct work_struct *wk)
{
struct iwl_mvm_sta *mvmsta = container_of(wk, struct iwl_mvm_sta,
rs_init_wk);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
struct ieee80211_sta *sta;
rcu_read_lock();
sta = rcu_dereference(mvmvif->mvm->fw_id_to_mac_id[mvmsta->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return;
}
iwl_mvm_rs_rate_init(mvmvif->mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
rcu_read_unlock();
}
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info, bool ndp) int tid, struct ieee80211_tx_info *info, bool ndp)
{ {
...@@ -1269,7 +1290,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -1269,7 +1290,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
(unsigned long)(lq_sta->last_tx + (unsigned long)(lq_sta->last_tx +
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
iwl_mvm_rs_rate_init(mvm, sta, info->band, true); schedule_work(&mvmsta->rs_init_wk);
return; return;
} }
lq_sta->last_tx = jiffies; lq_sta->last_tx = jiffies;
...@@ -1442,16 +1463,24 @@ static void rs_drv_mac80211_tx_status(void *mvm_r, ...@@ -1442,16 +1463,24 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
struct iwl_op_mode *op_mode = mvm_r; struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!iwl_mvm_sta_from_mac80211(sta)->vif) if (!mvmsta->vif)
return; return;
if (!ieee80211_is_data(hdr->frame_control) || if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK) info->flags & IEEE80211_TX_CTL_NO_ACK)
return; return;
/* If it's locked we are in middle of init flow
* just wait for next tx status to update the lq_sta data
*/
if (!mutex_trylock(&mvmsta->lq_sta.rs_drv.mutex))
return;
iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info, iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
ieee80211_is_qos_nullfunc(hdr->frame_control)); ieee80211_is_qos_nullfunc(hdr->frame_control));
mutex_unlock(&mvmsta->lq_sta.rs_drv.mutex);
} }
/* /*
...@@ -4132,10 +4161,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = { ...@@ -4132,10 +4161,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update) enum nl80211_band band, bool update)
{ {
if (iwl_mvm_has_tlc_offload(mvm)) if (iwl_mvm_has_tlc_offload(mvm)) {
rs_fw_rate_init(mvm, sta, band, update); rs_fw_rate_init(mvm, sta, band, update);
else } else {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mutex_lock(&mvmsta->lq_sta.rs_drv.mutex);
rs_drv_rate_init(mvm, sta, band, update); rs_drv_rate_init(mvm, sta, band, update);
mutex_unlock(&mvmsta->lq_sta.rs_drv.mutex);
}
} }
int iwl_mvm_rate_control_register(void) int iwl_mvm_rate_control_register(void)
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation * Copyright(c) 2018 - 2019 Intel Corporation
* *
* Contact Information: * Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com> * Intel Linux Wireless <linuxwifi@intel.com>
...@@ -376,6 +376,9 @@ struct iwl_lq_sta { ...@@ -376,6 +376,9 @@ struct iwl_lq_sta {
/* tx power reduce for this sta */ /* tx power reduce for this sta */
int tpc_reduce; int tpc_reduce;
/* avoid races of reinit and update table from rx_tx */
struct mutex mutex;
/* persistent fields - initialized only once - keep last! */ /* persistent fields - initialized only once - keep last! */
struct lq_sta_pers { struct lq_sta_pers {
#ifdef CONFIG_MAC80211_DEBUGFS #ifdef CONFIG_MAC80211_DEBUGFS
...@@ -440,6 +443,8 @@ struct iwl_mvm_sta; ...@@ -440,6 +443,8 @@ struct iwl_mvm_sta;
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable); bool enable);
void iwl_mvm_rs_init_wk(struct work_struct *wk);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm); void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
#endif #endif
......
...@@ -1684,6 +1684,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -1684,6 +1684,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
*/ */
if (iwl_mvm_has_tlc_offload(mvm)) if (iwl_mvm_has_tlc_offload(mvm))
iwl_mvm_rs_add_sta(mvm, mvm_sta); iwl_mvm_rs_add_sta(mvm, mvm_sta);
else
mutex_init(&mvm_sta->lq_sta.rs_drv.mutex);
INIT_WORK(&mvm_sta->rs_init_wk, iwl_mvm_rs_init_wk);
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
...@@ -1846,6 +1850,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -1846,6 +1850,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (ret) if (ret)
return ret; return ret;
cancel_work_sync(&mvm_sta->rs_init_wk);
/* flush its queues here since we are freeing mvm_sta */ /* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
if (ret) if (ret)
......
...@@ -424,6 +424,7 @@ struct iwl_mvm_sta { ...@@ -424,6 +424,7 @@ struct iwl_mvm_sta {
struct iwl_lq_sta_rs_fw rs_fw; struct iwl_lq_sta_rs_fw rs_fw;
struct iwl_lq_sta rs_drv; struct iwl_lq_sta rs_drv;
} lq_sta; } lq_sta;
struct work_struct rs_init_wk;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data; struct iwl_mvm_rxq_dup_data *dup_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment