Commit de24f638 authored by Liad Kaufman's avatar Liad Kaufman Committed by Luca Coelho

iwlwifi: mvm: allocate queue for probe response in dqa mode

In DQA mode, allocate a dedicated queue (#9) for P2P GO/soft
AP probe responses.
Signed-off-by: default avatarLiad Kaufman <liad.kaufman@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent a525d0ea
...@@ -97,6 +97,8 @@ enum { ...@@ -97,6 +97,8 @@ enum {
* Each MGMT queue is mapped to a single STA * Each MGMT queue is mapped to a single STA
* MGMT frames are frames that return true on ieee80211_is_mgmt() * MGMT frames are frames that return true on ieee80211_is_mgmt()
* @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
* @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
* responses
* @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
* DATA frames are intended for !ieee80211_is_mgmt() frames, but if * DATA frames are intended for !ieee80211_is_mgmt() frames, but if
* the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
...@@ -109,6 +111,7 @@ enum iwl_mvm_dqa_txq { ...@@ -109,6 +111,7 @@ enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
IWL_MVM_DQA_MIN_DATA_QUEUE = 10, IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
IWL_MVM_DQA_MAX_DATA_QUEUE = 31, IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
}; };
......
...@@ -540,6 +540,12 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -540,6 +540,12 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0); IWL_MAX_TID_COUNT, 0);
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
/* fall through */ /* fall through */
default: default:
/* /*
......
...@@ -1000,6 +1000,29 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -1000,6 +1000,29 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_VO,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
if ((vif->type == NL80211_IFTYPE_AP) &&
(mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
return -EINVAL;
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC) if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid; baddr = vif->bss_conf.bssid;
...@@ -1028,20 +1051,25 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -1028,20 +1051,25 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 qmask; u32 qmask = 0;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(vif); if (!iwl_mvm_is_dqa_supported(mvm))
qmask = iwl_mvm_mac_get_queues_mask(vif);
/* if (vif->type == NL80211_IFTYPE_AP) {
* The firmware defines the TFD queue mask to only be relevant /*
* for *unicast* queues, so the multicast (CAB) queue shouldn't * The firmware defines the TFD queue mask to only be relevant
* be included. * for *unicast* queues, so the multicast (CAB) queue shouldn't
*/ * be included.
if (vif->type == NL80211_IFTYPE_AP) */
qmask &= ~BIT(vif->cab_queue); qmask &= ~BIT(vif->cab_queue);
if (iwl_mvm_is_dqa_supported(mvm))
qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
ieee80211_vif_type_p2p(vif)); ieee80211_vif_type_p2p(vif));
} }
......
...@@ -475,6 +475,17 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -475,6 +475,17 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return dev_cmd; return dev_cmd;
} }
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc)
{
if (iwl_mvm_is_dqa_supported(mvm) &&
info->control.vif->type == NL80211_IFTYPE_AP &&
ieee80211_is_probe_resp(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
return info->hw_queue;
}
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
...@@ -484,6 +495,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -484,6 +495,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_tx_cmd *tx_cmd; struct iwl_tx_cmd *tx_cmd;
u8 sta_id; u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control); int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
memcpy(&info, skb->cb, sizeof(info)); memcpy(&info, skb->cb, sizeof(info));
...@@ -508,6 +520,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -508,6 +520,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.control.vif->type == NL80211_IFTYPE_STATION) info.control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
queue = info.hw_queue;
/* /*
* If the interface on which the frame is sent is the P2P_DEVICE * If the interface on which the frame is sent is the P2P_DEVICE
* or an AP/GO interface use the broadcast station associated * or an AP/GO interface use the broadcast station associated
...@@ -523,10 +537,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -523,10 +537,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
iwl_mvm_vif_from_mac80211(info.control.vif); iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP) info.control.vif->type == NL80211_IFTYPE_AP) {
sta_id = mvmvif->bcast_sta.sta_id; sta_id = mvmvif->bcast_sta.sta_id;
else if (info.control.vif->type == NL80211_IFTYPE_STATION && queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
is_multicast_ether_addr(hdr->addr1)) { hdr->frame_control);
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT) if (ap_sta_id != IWL_MVM_STATION_COUNT)
...@@ -534,7 +550,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -534,7 +550,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} }
} }
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue); IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
if (!dev_cmd) if (!dev_cmd)
...@@ -545,7 +561,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -545,7 +561,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */ /* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen); memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) { if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1; return -1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment