Commit e0ad8002 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: mt7663s: move tx/rx processing in the same txrx workqueue

Move mt7663 tx and rx processing in the same workqueue in order to
reduce jitter that can hit tcp performances. This is a preliminary patch
to switch to mt76 workers APIs
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent ced050ae
......@@ -446,15 +446,10 @@ struct mt76_usb {
#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
struct mt76_sdio {
struct workqueue_struct *txrx_wq;
struct {
struct work_struct xmit_work;
struct work_struct txrx_work;
struct work_struct status_work;
} tx;
struct {
struct work_struct recv_work;
struct work_struct net_work;
} rx;
struct work_struct stat_work;
u8 *xmit_buf[MT_TXQ_MCU_WA];
......
......@@ -681,7 +681,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev);
/* sdio */
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
void mt7663s_tx_work(struct work_struct *work);
void mt7663s_txrx_work(struct work_struct *work);
void mt7663s_rx_work(struct work_struct *work);
void mt7663s_sdio_irq(struct sdio_func *func);
......
......@@ -368,8 +368,7 @@ static int mt7663s_probe(struct sdio_func *func,
if (ret < 0)
goto err_free;
INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work);
INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work);
INIT_WORK(&mdev->sdio.txrx_work, mt7663s_txrx_work);
ret = mt7663s_hw_init(dev, func);
if (ret)
......
......@@ -138,6 +138,50 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
return i;
}
static int mt7663s_rx_handler(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
struct mt76s_intr *intr = sdio->intr_data;
int nframes = 0, ret;
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
sdio_release_host(sdio->func);
if (ret < 0)
goto out;
trace_dev_irq(dev, intr->isr, 0);
if (intr->isr & WHIER_RX0_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 0, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->net_work);
nframes += ret;
}
}
if (intr->isr & WHIER_RX1_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 1, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->net_work);
nframes += ret;
}
}
nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr);
out:
/* enable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
return nframes;
}
static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
int buf_sz, int *pse_size, int *ple_size)
{
......@@ -245,13 +289,14 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
return nframes;
}
void mt7663s_tx_work(struct work_struct *work)
void mt7663s_txrx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
tx.xmit_work);
txrx_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes = 0;
/* tx */
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
......@@ -261,59 +306,14 @@ void mt7663s_tx_work(struct work_struct *work)
nframes += ret;
}
if (nframes)
queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
queue_work(sdio->txrx_wq, &sdio->tx.status_work);
}
void mt7663s_rx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
rx.recv_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
struct mt76s_intr *intr = sdio->intr_data;
int nframes = 0, ret;
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
sdio_release_host(sdio->func);
if (ret < 0)
goto out;
trace_dev_irq(dev, intr->isr, 0);
if (intr->isr & WHIER_RX0_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 0, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->rx.net_work);
nframes += ret;
}
}
if (intr->isr & WHIER_RX1_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 1, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->rx.net_work);
nframes += ret;
}
}
/* rx */
nframes += mt7663s_rx_handler(dev);
if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr))
queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
if (nframes)
queue_work(sdio->txrx_wq, &sdio->txrx_work);
if (nframes) {
queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
return;
}
out:
/* enable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
queue_work(sdio->txrx_wq, &sdio->status_work);
}
void mt7663s_sdio_irq(struct sdio_func *func)
......@@ -324,5 +324,5 @@ void mt7663s_sdio_irq(struct sdio_func *func)
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
return;
queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
queue_work(sdio->txrx_wq, &sdio->txrx_work);
}
......@@ -66,10 +66,9 @@ void mt76s_stop_txrx(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
cancel_work_sync(&sdio->tx.xmit_work);
cancel_work_sync(&sdio->tx.status_work);
cancel_work_sync(&sdio->rx.recv_work);
cancel_work_sync(&sdio->rx.net_work);
cancel_work_sync(&sdio->txrx_work);
cancel_work_sync(&sdio->status_work);
cancel_work_sync(&sdio->net_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
......@@ -257,7 +256,7 @@ static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
struct mt76_sdio *sdio = &dev->sdio;
queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
queue_work(sdio->txrx_wq, &sdio->txrx_work);
}
static const struct mt76_queue_ops sdio_queue_ops = {
......@@ -269,7 +268,7 @@ static const struct mt76_queue_ops sdio_queue_ops = {
static void mt76s_tx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
tx.status_work);
status_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i;
......@@ -284,7 +283,7 @@ static void mt76s_tx_work(struct work_struct *work)
static void mt76s_rx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
rx.net_work);
net_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i;
......@@ -343,8 +342,8 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
return -ENOMEM;
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
INIT_WORK(&sdio->status_work, mt76s_tx_work);
INIT_WORK(&sdio->net_work, mt76s_rx_work);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment