Commit 3128b3d8 authored by Rajkumar Manoharan's avatar Rajkumar Manoharan Committed by Kalle Valo

ath10k: speedup htt rx descriptor processing for rx_ind

In follow up patch, htt rx descriptors will be reused instead of
dealloc and refill. To achieve that htt rx indication messages
should not be deferred and should be processed in pci tasklet itself.
Also from rx indication message, mpdu_count alone is used. So it is
maintained as atomic variable and all rx amsdu handlers are done
processed from txrx tasklet. This change get rid of rx_compl_q usage.
Signed-off-by: default avatarRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 18235664
...@@ -1663,6 +1663,7 @@ struct ath10k_htt { ...@@ -1663,6 +1663,7 @@ struct ath10k_htt {
* used to avoid further failures */ * used to avoid further failures */
bool rx_confused; bool rx_confused;
struct tasklet_struct rx_replenish_task; struct tasklet_struct rx_replenish_task;
atomic_t num_mpdus_ready;
/* This is used to group tx/rx completions separately and process them /* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */ * in batches to reduce cache stalls */
......
...@@ -526,6 +526,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) ...@@ -526,6 +526,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_compl_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q);
skb_queue_head_init(&htt->tx_fetch_ind_q); skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt); (unsigned long)htt);
...@@ -1564,8 +1565,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ...@@ -1564,8 +1565,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return 0; return 0;
} }
static void ath10k_htt_rx_handler(struct ath10k_htt *htt, static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
struct htt_rx_indication *rx) struct htt_rx_indication *rx)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
struct htt_rx_indication_mpdu_range *mpdu_ranges; struct htt_rx_indication_mpdu_range *mpdu_ranges;
...@@ -1584,19 +1585,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, ...@@ -1584,19 +1585,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
for (i = 0; i < num_mpdu_ranges; i++) for (i = 0; i < num_mpdu_ranges; i++)
mpdu_count += mpdu_ranges[i].mpdu_count; mpdu_count += mpdu_ranges[i].mpdu_count;
while (mpdu_count--) { atomic_add(mpdu_count, &htt->num_mpdus_ready);
if (ath10k_htt_rx_handle_amsdu(htt) < 0)
break;
}
tasklet_schedule(&htt->rx_replenish_task); tasklet_schedule(&htt->txrx_compl_task);
} }
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt) static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
{ {
ath10k_htt_rx_handle_amsdu(htt); atomic_inc(&htt->num_mpdus_ready);
tasklet_schedule(&htt->rx_replenish_task); tasklet_schedule(&htt->txrx_compl_task);
} }
static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
...@@ -2250,9 +2248,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -2250,9 +2248,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break; break;
} }
case HTT_T2H_MSG_TYPE_RX_IND: case HTT_T2H_MSG_TYPE_RX_IND:
skb_queue_tail(&htt->rx_compl_q, skb); ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
tasklet_schedule(&htt->txrx_compl_task); break;
return;
case HTT_T2H_MSG_TYPE_PEER_MAP: { case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = { struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id, .vdev_id = resp->peer_map.vdev_id,
...@@ -2419,18 +2416,14 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) ...@@ -2419,18 +2416,14 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
struct sk_buff_head rx_q; struct sk_buff_head rx_q;
struct sk_buff_head rx_ind_q; struct sk_buff_head rx_ind_q;
struct sk_buff_head tx_ind_q; struct sk_buff_head tx_ind_q;
struct htt_resp *resp;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
int num_mpdus;
__skb_queue_head_init(&rx_q); __skb_queue_head_init(&rx_q);
__skb_queue_head_init(&rx_ind_q); __skb_queue_head_init(&rx_ind_q);
__skb_queue_head_init(&tx_ind_q); __skb_queue_head_init(&tx_ind_q);
spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags); spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
...@@ -2454,10 +2447,12 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) ...@@ -2454,10 +2447,12 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
ath10k_mac_tx_push_pending(ar); ath10k_mac_tx_push_pending(ar);
while ((skb = __skb_dequeue(&rx_q))) { num_mpdus = atomic_read(&htt->num_mpdus_ready);
resp = (struct htt_resp *)skb->data; atomic_sub(num_mpdus, &htt->num_mpdus_ready);
ath10k_htt_rx_handler(htt, &resp->rx_ind);
dev_kfree_skb_any(skb); while (num_mpdus--) {
if (ath10k_htt_rx_handle_amsdu(htt))
break;
} }
while ((skb = __skb_dequeue(&rx_ind_q))) { while ((skb = __skb_dequeue(&rx_ind_q))) {
...@@ -2466,4 +2461,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) ...@@ -2466,4 +2461,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
spin_unlock_bh(&htt->rx_ring.lock); spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
tasklet_schedule(&htt->rx_replenish_task);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment