Commit d0998eb8 authored by John Crispin's avatar John Crispin Committed by Kalle Valo

ath11k: optimise ath11k_dp_tx_completion_handler

the current code does 4 memcpys for each completion frame.
1) duplicate the desc
2 + 3) inside kfifo insertion
4) kfifo remove

The code simply drops the kfifo and uses a trivial ring buffer. This
requires a single memcpy for insertion. There is no removal needed as
we can simply use the inserted data for processing. As the code runs
inside the NAPI context it is atomic and there is no need for most of
the locking.
Signed-off-by: default avatarJohn Crispin <john@phrozen.org>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent 8cfa7ef8
......@@ -3,7 +3,6 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/kfifo.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
......@@ -828,10 +827,7 @@ void ath11k_dp_free(struct ath11k_base *ab)
ath11k_dp_tx_pending_cleanup, ab);
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
spin_lock_bh(&dp->tx_ring[i].tx_status_lock);
kfifo_free(&dp->tx_ring[i].tx_status_fifo);
spin_unlock_bh(&dp->tx_ring[i].tx_status_lock);
kfree(dp->tx_ring[i].tx_status);
}
/* Deinit any SOC level resource */
......@@ -871,17 +867,17 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
if (ret)
goto fail_link_desc_cleanup;
size = roundup_pow_of_two(DP_TX_COMP_RING_SIZE);
size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
spin_lock_init(&dp->tx_ring[i].tx_status_lock);
ret = kfifo_alloc(&dp->tx_ring[i].tx_status_fifo, size,
GFP_KERNEL);
if (ret)
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status)
goto fail_cmn_srng_cleanup;
}
......
......@@ -6,7 +6,6 @@
#ifndef ATH11K_DP_H
#define ATH11K_DP_H
#include <linux/kfifo.h>
#include "hal_rx.h"
struct ath11k_base;
......@@ -58,6 +57,8 @@ struct dp_rxdma_ring {
int bufs_max;
};
#define ATH11K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
struct dp_tx_ring {
u8 tcl_data_ring_id;
struct dp_srng tcl_data_ring;
......@@ -65,11 +66,9 @@ struct dp_tx_ring {
struct idr txbuf_idr;
/* Protects txbuf_idr and num_pending */
spinlock_t tx_idr_lock;
DECLARE_KFIFO_PTR(tx_status_fifo, struct hal_wbm_release_ring);
/* lock to protect tx_status_fifo because tx_status_fifo can be
* accessed concurrently.
*/
spinlock_t tx_status_lock;
struct hal_wbm_release_ring *tx_status;
int tx_status_head;
int tx_status_tail;
};
struct ath11k_pdev_mon_stats {
......
......@@ -79,7 +79,6 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct dp_tx_ring *tx_ring;
u8 cached_desc[HAL_TCL_DESC_LEN];
void *hal_tcl_desc;
u8 pool_id;
u8 hal_ring_id;
......@@ -167,8 +166,6 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
skb_cb->vif = arvif->vif;
skb_cb->ar = ar;
ath11k_hal_tx_cmd_desc_setup(ab, cached_desc, &ti);
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tcl_ring = &ab->hal.srng_list[hal_ring_id];
......@@ -188,7 +185,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
goto fail_unmap_dma;
}
ath11k_hal_tx_desc_sync(cached_desc, hal_tcl_desc);
ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
sizeof(struct hal_tlv_hdr), &ti);
ath11k_hal_srng_access_end(ab, tcl_ring);
......@@ -432,47 +430,45 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct sk_buff *msdu;
struct hal_wbm_release_ring tx_status;
struct hal_tx_status ts;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
u32 *desc;
u32 msdu_id;
u8 mac_id;
spin_lock_bh(&status_ring->lock);
ath11k_hal_srng_access_begin(ab, status_ring);
spin_lock_bh(&tx_ring->tx_status_lock);
while (!kfifo_is_full(&tx_ring->tx_status_fifo) &&
while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
tx_ring->tx_status_tail) &&
(desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
ath11k_hal_tx_status_desc_sync((void *)desc,
(void *)&tx_status);
kfifo_put(&tx_ring->tx_status_fifo, tx_status);
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
desc, sizeof(struct hal_wbm_release_ring));
tx_ring->tx_status_head =
ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
kfifo_is_full(&tx_ring->tx_status_fifo)) {
(ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
spin_unlock_bh(&tx_ring->tx_status_lock);
ath11k_hal_srng_access_end(ab, status_ring);
spin_unlock_bh(&status_ring->lock);
spin_lock_bh(&tx_ring->tx_status_lock);
while (kfifo_get(&tx_ring->tx_status_fifo, &tx_status)) {
memset(&ts, 0, sizeof(ts));
ath11k_hal_tx_status_parse(ab, &tx_status, &ts);
while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
struct hal_wbm_release_ring *tx_status;
tx_ring->tx_status_tail =
ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
ath11k_hal_tx_status_parse(ab, tx_status, &ts);
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, ts.desc_id);
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ts.desc_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
ath11k_dp_tx_process_htt_tx_complete(ab,
(void *)&tx_status,
(void *)tx_status,
mac_id, msdu_id,
tx_ring);
continue;
......@@ -494,12 +490,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
/* TODO: Locking optimization so that tx_completion for an msdu
* is not called with tx_status_lock acquired
*/
ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
}
spin_unlock_bh(&tx_ring->tx_status_lock);
}
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
......
......@@ -74,19 +74,6 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
tcl_cmd->info4 = 0;
}
/* Commit the descriptor to hardware */
void ath11k_hal_tx_desc_sync(void *tx_desc_cached, void *hw_desc)
{
memcpy(hw_desc + sizeof(struct hal_tlv_hdr), tx_desc_cached,
sizeof(struct hal_tcl_data_cmd));
}
/* Get the descriptor status from hardware */
void ath11k_hal_tx_status_desc_sync(void *hw_desc, void *local_desc)
{
memcpy(local_desc, hw_desc, HAL_TX_STATUS_DESC_LEN);
}
void ath11k_hal_tx_status_parse(struct ath11k_base *ab,
struct hal_wbm_release_ring *desc,
struct hal_tx_status *ts)
......
......@@ -61,11 +61,9 @@ struct hal_tx_status {
void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
struct hal_tx_info *ti);
void ath11k_hal_tx_desc_sync(void *tx_desc_cached, void *hw_desc);
void ath11k_hal_tx_status_parse(struct ath11k_base *ab,
struct hal_wbm_release_ring *desc,
struct hal_tx_status *ts);
void ath11k_hal_tx_status_desc_sync(void *hw_desc, void *local_desc);
void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
enum hal_reo_cmd_type type,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment