Commit 679955d5 authored by Kuan-Chung Chen's avatar Kuan-Chung Chen Committed by Kalle Valo

wifi: rtw89: enable VO TX AMPDU

To improve VO throughput, we enable VO TX AMPDU.

We measure the latency of enable or disable VO TX AMPDU. The experimental
results show that the difference between the two is insignificant only
300µs, so the little impact can be ignored for user experience.

Moreover, we found some APs will have a group key handshake timeout issue
when the EAPOL's TID is already setup BA session. Therefore, when
transmitting EAPOL, if EAPOL's TID BA session is already setup, we need
to delete it.
Signed-off-by: default avatarKuan-Chung Chen <damon.chen@realtek.com>
Signed-off-by: default avatarPing-Ke Shih <pkshih@realtek.com>
Signed-off-by: default avatarKalle Valo <kvalo@kernel.org>
Link: https://lore.kernel.org/r/20220610072610.27095-7-pkshih@realtek.com
parent 29363fb6
...@@ -408,18 +408,30 @@ rtw89_core_get_tx_type(struct rtw89_dev *rtwdev, ...@@ -408,18 +408,30 @@ rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
static void static void
rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev, rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req, u8 tid) struct rtw89_core_tx_request *tx_req,
enum btc_pkt_type pkt_type)
{ {
struct ieee80211_sta *sta = tx_req->sta; struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
struct rtw89_sta *rtwsta; struct rtw89_sta *rtwsta;
u8 ampdu_num; u8 ampdu_num;
u8 tid;
if (pkt_type == PACKET_EAPOL) {
desc_info->bk = true;
return;
}
if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU))
return;
if (!sta) { if (!sta) {
rtw89_warn(rtwdev, "cannot set ampdu info without sta\n"); rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
return; return;
} }
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
rtwsta = (struct rtw89_sta *)sta->drv_priv; rtwsta = (struct rtw89_sta *)sta->drv_priv;
ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ? ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
...@@ -720,8 +732,6 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, ...@@ -720,8 +732,6 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
/* enable wd_info for AMPDU */ /* enable wd_info for AMPDU */
desc_info->en_wd_info = true; desc_info->en_wd_info = true;
if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU)
rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, tid);
if (IEEE80211_SKB_CB(skb)->control.hw_key) if (IEEE80211_SKB_CB(skb)->control.hw_key)
rtw89_core_tx_update_sec_key(rtwdev, tx_req); rtw89_core_tx_update_sec_key(rtwdev, tx_req);
...@@ -832,6 +842,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, ...@@ -832,6 +842,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_data_info(rtwdev, tx_req); rtw89_core_tx_update_data_info(rtwdev, tx_req);
pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req); pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type); rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type);
rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb); rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
break; break;
case RTW89_CORE_TX_TYPE_FWCMD: case RTW89_CORE_TX_TYPE_FWCMD:
...@@ -1857,6 +1868,55 @@ static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev, ...@@ -1857,6 +1868,55 @@ static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
spin_unlock_bh(&rtwdev->ba_lock); spin_unlock_bh(&rtwdev->ba_lock);
} }
static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta)
{
struct rtw89_txq *rtwtxq, *tmp;
spin_lock_bh(&rtwdev->ba_lock);
list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
if (sta == txq->sta) {
clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
list_del_init(&rtwtxq->list);
}
}
spin_unlock_bh(&rtwdev->ba_lock);
}
static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
struct rtw89_txq *rtwtxq)
{
struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
struct ieee80211_sta *sta = txq->sta;
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc))
return;
if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) ||
test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
return;
spin_lock_bh(&rtwdev->ba_lock);
if (!list_empty(&rtwtxq->list)) {
list_del_init(&rtwtxq->list);
goto out;
}
set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
ieee80211_stop_tx_ba_session(sta, txq->tid);
cancel_delayed_work(&rtwdev->forbid_ba_work);
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
RTW89_FORBID_BA_TIMER);
out:
spin_unlock_bh(&rtwdev->ba_lock);
}
static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev, static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct rtw89_txq *rtwtxq, struct rtw89_txq *rtwtxq,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -1866,13 +1926,15 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev, ...@@ -1866,13 +1926,15 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = txq->sta; struct ieee80211_sta *sta = txq->sta;
struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
if (unlikely(skb_get_queue_mapping(skb) == IEEE80211_AC_VO)) if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
return; return;
}
if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) if (unlikely(!sta))
return; return;
if (unlikely(!sta)) if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
return; return;
if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags))) if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
...@@ -2032,6 +2094,20 @@ static void rtw89_core_txq_reinvoke_work(struct work_struct *w) ...@@ -2032,6 +2094,20 @@ static void rtw89_core_txq_reinvoke_work(struct work_struct *w)
queue_work(rtwdev->txq_wq, &rtwdev->txq_work); queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
} }
static void rtw89_forbid_ba_work(struct work_struct *w)
{
struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
forbid_ba_work.work);
struct rtw89_txq *rtwtxq, *tmp;
spin_lock_bh(&rtwdev->ba_lock);
list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
list_del_init(&rtwtxq->list);
}
spin_unlock_bh(&rtwdev->ba_lock);
}
static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev, static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
u32 throughput, u64 cnt) u32 throughput, u64 cnt)
{ {
...@@ -2327,6 +2403,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev, ...@@ -2327,6 +2403,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_mac_bf_monitor_calc(rtwdev, sta, true); rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
rtw89_mac_bf_disassoc(rtwdev, vif, sta); rtw89_mac_bf_disassoc(rtwdev, vif, sta);
rtw89_core_free_sta_pending_ba(rtwdev, sta); rtw89_core_free_sta_pending_ba(rtwdev, sta);
rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta);
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
if (sta->tdls) if (sta->tdls)
...@@ -2839,6 +2916,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) ...@@ -2839,6 +2916,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work); cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work); cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
cancel_delayed_work_sync(&rtwdev->cfo_track_work); cancel_delayed_work_sync(&rtwdev->cfo_track_work);
cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
mutex_lock(&rtwdev->mutex); mutex_lock(&rtwdev->mutex);
...@@ -2858,6 +2936,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) ...@@ -2858,6 +2936,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
u8 band; u8 band;
INIT_LIST_HEAD(&rtwdev->ba_list); INIT_LIST_HEAD(&rtwdev->ba_list);
INIT_LIST_HEAD(&rtwdev->forbid_ba_list);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list); INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwdev->early_h2c_list); INIT_LIST_HEAD(&rtwdev->early_h2c_list);
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
...@@ -2873,6 +2952,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) ...@@ -2873,6 +2952,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
spin_lock_init(&rtwdev->ba_lock); spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock); spin_lock_init(&rtwdev->rpwm_lock);
......
...@@ -29,6 +29,7 @@ extern const struct ieee80211_ops rtw89_ops; ...@@ -29,6 +29,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define INV_RF_DATA 0xffffffff #define INV_RF_DATA 0xffffffff
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2) #define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
#define CFO_TRACK_MAX_USER 64 #define CFO_TRACK_MAX_USER 64
#define MAX_RSSI 110 #define MAX_RSSI 110
#define RSSI_FACTOR 1 #define RSSI_FACTOR 1
...@@ -144,6 +145,7 @@ enum rtw89_core_rx_type { ...@@ -144,6 +145,7 @@ enum rtw89_core_rx_type {
enum rtw89_txq_flags { enum rtw89_txq_flags {
RTW89_TXQ_F_AMPDU = 0, RTW89_TXQ_F_AMPDU = 0,
RTW89_TXQ_F_BLOCK_BA = 1, RTW89_TXQ_F_BLOCK_BA = 1,
RTW89_TXQ_F_FORBID_BA = 2,
}; };
enum rtw89_net_type { enum rtw89_net_type {
...@@ -3137,10 +3139,12 @@ struct rtw89_dev { ...@@ -3137,10 +3139,12 @@ struct rtw89_dev {
struct workqueue_struct *txq_wq; struct workqueue_struct *txq_wq;
struct work_struct txq_work; struct work_struct txq_work;
struct delayed_work txq_reinvoke_work; struct delayed_work txq_reinvoke_work;
/* used to protect ba_list */ /* used to protect ba_list and forbid_ba_list */
spinlock_t ba_lock; spinlock_t ba_lock;
/* txqs to setup ba session */ /* txqs to setup ba session */
struct list_head ba_list; struct list_head ba_list;
/* txqs to forbid ba session */
struct list_head forbid_ba_list;
struct work_struct ba_work; struct work_struct ba_work;
/* used to protect rpwm */ /* used to protect rpwm */
spinlock_t rpwm_lock; spinlock_t rpwm_lock;
...@@ -3187,6 +3191,7 @@ struct rtw89_dev { ...@@ -3187,6 +3191,7 @@ struct rtw89_dev {
struct delayed_work coex_bt_devinfo_work; struct delayed_work coex_bt_devinfo_work;
struct delayed_work coex_rfk_chk_work; struct delayed_work coex_rfk_chk_work;
struct delayed_work cfo_track_work; struct delayed_work cfo_track_work;
struct delayed_work forbid_ba_work;
struct rtw89_ppdu_sts_info ppdu_sts; struct rtw89_ppdu_sts_info ppdu_sts;
u8 total_sta_assoc; u8 total_sta_assoc;
bool scanning; bool scanning;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment