Commit 974327a4 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: mt76s: move tx processing in a dedicated wq

Introduce mt76s_txrx_wq workqueue and move tx processing from kthread to
a dedicated work. This is preliminary patch to improve mt7663s throughput
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 72372f3a
......@@ -446,10 +446,12 @@ struct mt76_usb {
};
struct mt76_sdio {
struct task_struct *tx_kthread;
struct task_struct *kthread;
struct work_struct stat_work;
struct workqueue_struct *txrx_wq;
struct work_struct tx_work;
unsigned long state;
struct sdio_func *func;
......
......@@ -678,7 +678,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev);
/* sdio */
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
int mt7663s_kthread_run(void *data);
void mt7663s_tx_work(struct work_struct *work);
void mt7663s_sdio_irq(struct sdio_func *func);
#endif
......@@ -364,18 +364,15 @@ static int mt7663s_probe(struct sdio_func *func,
dev->ops = ops;
sdio_set_drvdata(func, dev);
mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev,
"mt7663s_tx");
if (IS_ERR(mdev->sdio.tx_kthread))
return PTR_ERR(mdev->sdio.tx_kthread);
ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
goto err_free;
INIT_WORK(&mdev->sdio.tx_work, mt7663s_tx_work);
ret = mt7663s_hw_init(dev, func);
if (ret)
goto err_free;
goto err_deinit;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
......
......@@ -116,12 +116,12 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
return err;
}
static int mt7663s_tx_update_sched(struct mt7615_dev *dev,
static int mt7663s_tx_update_sched(struct mt76_dev *dev,
struct mt76_queue_entry *e,
bool mcu)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_sdio *sdio = &dev->sdio;
struct mt76_phy *mphy = &dev->phy;
struct ieee80211_hdr *hdr;
int size, ret = -EBUSY;
......@@ -157,10 +157,10 @@ static int mt7663s_tx_update_sched(struct mt7615_dev *dev,
return ret;
}
static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q;
struct mt76_sdio *sdio = &dev->mt76.sdio;
bool mcu = q == dev->q_tx[MT_TXQ_MCU].q;
struct mt76_sdio *sdio = &dev->sdio;
int nframes = 0;
while (q->first != q->tail) {
......@@ -174,9 +174,12 @@ static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
len = roundup(len, sdio->func->cur_blksize);
/* TODO: skb_walk_frags and then write to SDIO port */
sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len);
sdio_release_host(sdio->func);
if (err) {
dev_err(dev->mt76.dev, "sdio write failed: %d\n", err);
dev_err(dev->dev, "sdio write failed: %d\n", err);
return -EIO;
}
......@@ -188,46 +191,25 @@ static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
return nframes;
}
static int mt7663s_tx_run_queues(struct mt7615_dev *dev)
void mt7663s_tx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, tx_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes = 0;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q);
ret = mt7663s_tx_run_queue(dev, dev->q_tx[i].q);
if (ret < 0)
return ret;
break;
nframes += ret;
}
if (nframes)
queue_work(sdio->txrx_wq, &sdio->tx_work);
return nframes;
}
int mt7663s_kthread_run(void *data)
{
struct mt7615_dev *dev = data;
struct mt76_phy *mphy = &dev->mt76.phy;
while (!kthread_should_stop()) {
int ret;
cond_resched();
sdio_claim_host(dev->mt76.sdio.func);
ret = mt7663s_tx_run_queues(dev);
sdio_release_host(dev->mt76.sdio.func);
if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
} else {
wake_up_process(dev->mt76.sdio.kthread);
}
}
return 0;
wake_up_process(sdio->kthread);
}
void mt7663s_sdio_irq(struct sdio_func *func)
......@@ -258,7 +240,7 @@ void mt7663s_sdio_irq(struct sdio_func *func)
if (intr.isr & WHIER_TX_DONE_INT_EN) {
mt7663s_refill_sched_quota(dev, intr.tx.wtqcr);
mt7663s_tx_run_queues(dev);
queue_work(sdio->txrx_wq, &sdio->tx_work);
wake_up_process(sdio->kthread);
}
} while (intr.isr);
......
......@@ -68,6 +68,7 @@ void mt76s_stop_txrx(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
cancel_work_sync(&sdio->tx_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
......@@ -179,7 +180,6 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (wake)
ieee80211_wake_queue(dev->hw, qid);
wake_up_process(dev->sdio.tx_kthread);
out:
return n_dequeued;
}
......@@ -272,7 +272,7 @@ static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
struct mt76_sdio *sdio = &dev->sdio;
wake_up_process(sdio->tx_kthread);
queue_work(sdio->txrx_wq, &sdio->tx_work);
}
static const struct mt76_queue_ops sdio_queue_ops = {
......@@ -324,9 +324,13 @@ void mt76s_deinit(struct mt76_dev *dev)
int i;
kthread_stop(sdio->kthread);
kthread_stop(sdio->tx_kthread);
mt76s_stop_txrx(dev);
if (sdio->txrx_wq) {
destroy_workqueue(sdio->txrx_wq);
sdio->txrx_wq = NULL;
}
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
sdio_release_host(sdio->func);
......@@ -353,6 +357,12 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
{
struct mt76_sdio *sdio = &dev->sdio;
sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
WQ_UNBOUND | WQ_HIGHPRI,
WQ_UNBOUND_MAX_ACTIVE);
if (!sdio->txrx_wq)
return -ENOMEM;
sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
if (IS_ERR(sdio->kthread))
return PTR_ERR(sdio->kthread);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment