Commit 3e5f374d authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: mt7663s: move rx processing in txrx wq

Move rx processing to mt76s_txrx_wq in order to minimize the interval when
the sdio bus is locked during rx
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 974327a4
......@@ -451,6 +451,7 @@ struct mt76_sdio {
struct workqueue_struct *txrx_wq;
struct work_struct tx_work;
struct work_struct rx_work;
unsigned long state;
......
......@@ -679,6 +679,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev);
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
void mt7663s_tx_work(struct work_struct *work);
void mt7663s_rx_work(struct work_struct *work);
void mt7663s_sdio_irq(struct sdio_func *func);
#endif
......@@ -369,6 +369,7 @@ static int mt7663s_probe(struct sdio_func *func,
goto err_free;
INIT_WORK(&mdev->sdio.tx_work, mt7663s_tx_work);
INIT_WORK(&mdev->sdio.rx_work, mt7663s_rx_work);
ret = mt7663s_hw_init(dev, func);
if (ret)
......
......@@ -19,9 +19,9 @@
#include "sdio.h"
#include "mac.h"
static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data)
static void mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
struct mt76_sdio *sdio = &dev->sdio;
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */
......@@ -61,11 +61,11 @@ static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
return skb;
}
static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76s_intr *intr)
{
struct mt76_queue *q = &dev->mt76.q_rx[qid];
struct mt76_sdio *sdio = &dev->mt76.sdio;
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i, order;
struct page *page;
u8 *buf;
......@@ -86,9 +86,12 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
sdio_release_host(sdio->func);
if (err < 0) {
dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err);
dev_err(dev->dev, "sdio read data failed:%d\n", err);
__free_pages(page, order);
return err;
}
......@@ -113,7 +116,7 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
q->queued += i;
spin_unlock_bh(&q->lock);
return err;
return i;
}
static int mt7663s_tx_update_sched(struct mt76_dev *dev,
......@@ -212,39 +215,60 @@ void mt7663s_tx_work(struct work_struct *work)
wake_up_process(sdio->kthread);
}
void mt7663s_sdio_irq(struct sdio_func *func)
void mt7663s_rx_work(struct work_struct *work)
{
struct mt7615_dev *dev = sdio_get_drvdata(func);
struct mt76_sdio *sdio = &dev->mt76.sdio;
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, rx_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
struct mt76s_intr intr;
int nframes = 0, ret;
/* disable interrupt */
sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0);
do {
sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr));
trace_dev_irq(&dev->mt76, intr.isr, 0);
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0);
sdio_readsb(sdio->func, &intr, MCR_WHISR, sizeof(struct mt76s_intr));
sdio_release_host(sdio->func);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
goto out;
trace_dev_irq(dev, intr.isr, 0);
if (intr.isr & WHIER_RX0_DONE_INT_EN) {
mt7663s_rx_run_queue(dev, 0, &intr);
ret = mt7663s_rx_run_queue(dev, 0, &intr);
if (ret > 0) {
wake_up_process(sdio->kthread);
nframes += ret;
}
}
if (intr.isr & WHIER_RX1_DONE_INT_EN) {
mt7663s_rx_run_queue(dev, 1, &intr);
ret = mt7663s_rx_run_queue(dev, 1, &intr);
if (ret > 0) {
wake_up_process(sdio->kthread);
nframes += ret;
}
}
if (intr.isr & WHIER_TX_DONE_INT_EN) {
mt7663s_refill_sched_quota(dev, intr.tx.wtqcr);
queue_work(sdio->txrx_wq, &sdio->tx_work);
wake_up_process(sdio->kthread);
}
} while (intr.isr);
out:
if (nframes) {
queue_work(sdio->txrx_wq, &sdio->rx_work);
return;
}
/* enable interrupt */
sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0);
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0);
sdio_release_host(sdio->func);
}
void mt7663s_sdio_irq(struct sdio_func *func)
{
struct mt7615_dev *dev = sdio_get_drvdata(func);
struct mt76_sdio *sdio = &dev->mt76.sdio;
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
return;
queue_work(sdio->txrx_wq, &sdio->rx_work);
}
......@@ -69,6 +69,7 @@ void mt76s_stop_txrx(struct mt76_dev *dev)
struct mt76_sdio *sdio = &dev->sdio;
cancel_work_sync(&sdio->tx_work);
cancel_work_sync(&sdio->rx_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment