Commit 6a618acb authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: sdio: convert {status/net}_work to mt76_worker

In order to improve driver throughput, convert status_work and net_work
to mt76 worker APIs.
Remove txrx_wq sdio workqueue since it is no longer used
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 1a3efbcc
......@@ -445,11 +445,10 @@ struct mt76_usb {
#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
struct mt76_sdio {
struct workqueue_struct *txrx_wq;
struct mt76_worker txrx_worker;
struct work_struct status_work;
struct work_struct net_work;
struct mt76_worker status_worker;
struct mt76_worker net_worker;
struct work_struct stat_work;
u8 *xmit_buf[MT_TXQ_MCU_WA];
......@@ -1059,7 +1058,6 @@ void mt76u_queues_deinit(struct mt76_dev *dev);
int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops);
int mt76s_alloc_queues(struct mt76_dev *dev);
void mt76s_stop_txrx(struct mt76_dev *dev);
void mt76s_deinit(struct mt76_dev *dev);
struct sk_buff *
......
......@@ -366,11 +366,11 @@ static int mt7663s_probe(struct sdio_func *func,
ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
goto err_free;
goto error;
ret = mt7663s_hw_init(dev, func);
if (ret)
goto err_deinit;
goto error;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
......@@ -381,7 +381,7 @@ static int mt7663s_probe(struct sdio_func *func,
GFP_KERNEL);
if (!mdev->sdio.intr_data) {
ret = -ENOMEM;
goto err_deinit;
goto error;
}
for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
......@@ -390,30 +390,29 @@ static int mt7663s_probe(struct sdio_func *func,
GFP_KERNEL);
if (!mdev->sdio.xmit_buf[i]) {
ret = -ENOMEM;
goto err_deinit;
goto error;
}
}
ret = mt76s_alloc_queues(&dev->mt76);
if (ret)
goto err_deinit;
goto error;
ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker,
mt7663s_txrx_worker, "sdio-txrx");
if (ret)
goto err_deinit;
goto error;
sched_set_fifo_low(mdev->sdio.txrx_worker.task);
ret = mt7663_usb_sdio_register_device(dev);
if (ret)
goto err_deinit;
goto error;
return 0;
err_deinit:
error:
mt76s_deinit(&dev->mt76);
err_free:
mt76_free_device(&dev->mt76);
return ret;
......@@ -454,7 +453,13 @@ static int mt7663s_suspend(struct device *dev)
return err;
mt76_worker_disable(&mdev->mt76.sdio.txrx_worker);
mt76s_stop_txrx(&mdev->mt76);
mt76_worker_disable(&mdev->mt76.sdio.status_worker);
mt76_worker_disable(&mdev->mt76.sdio.net_worker);
cancel_work_sync(&mdev->mt76.sdio.stat_work);
clear_bit(MT76_READING_STATS, &mdev->mphy.state);
mt76_tx_status_check(&mdev->mt76, NULL, true);
return 0;
}
......@@ -466,6 +471,8 @@ static int mt7663s_resume(struct device *dev)
int err;
mt76_worker_enable(&mdev->mt76.sdio.txrx_worker);
mt76_worker_enable(&mdev->mt76.sdio.status_worker);
mt76_worker_enable(&mdev->mt76.sdio.net_worker);
err = mt7615_mcu_set_drv_ctrl(mdev);
if (err)
......
......@@ -150,7 +150,7 @@ static int mt7663s_rx_handler(struct mt76_dev *dev)
if (intr->isr & WHIER_RX0_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 0, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->net_work);
mt76_worker_schedule(&sdio->net_worker);
nframes += ret;
}
}
......@@ -158,7 +158,7 @@ static int mt7663s_rx_handler(struct mt76_dev *dev)
if (intr->isr & WHIER_RX1_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 1, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->net_work);
mt76_worker_schedule(&sdio->net_worker);
nframes += ret;
}
}
......@@ -269,7 +269,7 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
}
mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz);
queue_work(sdio->txrx_wq, &sdio->status_work);
mt76_worker_schedule(&sdio->status_worker);
return nframes;
}
......
......@@ -62,19 +62,6 @@ static int mt76s_alloc_tx(struct mt76_dev *dev)
return 0;
}
void mt76s_stop_txrx(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
cancel_work_sync(&sdio->status_work);
cancel_work_sync(&sdio->net_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
mt76_tx_status_check(dev, NULL, true);
}
EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
int mt76s_alloc_queues(struct mt76_dev *dev)
{
int err;
......@@ -129,10 +116,32 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
return nframes;
}
static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
static void mt76s_net_worker(struct mt76_worker *w)
{
struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
net_worker);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes;
do {
nframes = 0;
local_bh_disable();
rcu_read_lock();
mt76_for_each_q_rx(dev, i)
nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
rcu_read_unlock();
local_bh_enable();
} while (nframes > 0);
}
static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
int nframes = 0;
bool wake;
while (q->queued > 0) {
......@@ -148,6 +157,7 @@ static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
}
mt76_queue_tx_complete(dev, q, &entry);
nframes++;
}
wake = q->stopped && q->queued < q->ndesc - 8;
......@@ -158,12 +168,32 @@ static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
wake_up(&dev->tx_wait);
if (qid == MT_TXQ_MCU)
return;
goto out;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
out:
return nframes;
}
static void mt76s_status_worker(struct mt76_worker *w)
{
struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
status_worker);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes;
do {
nframes = 0;
for (i = 0; i < MT_TXQ_MCU_WA; i++)
nframes += mt76s_process_tx_queue(dev, i);
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->sdio.stat_work);
} while (nframes > 0);
}
static void mt76s_tx_status_data(struct work_struct *work)
......@@ -264,51 +294,19 @@ static const struct mt76_queue_ops sdio_queue_ops = {
.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
};
static void mt76s_tx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
status_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i;
for (i = 0; i < MT_TXQ_MCU_WA; i++)
mt76s_process_tx_queue(dev, i);
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->sdio.stat_work);
}
static void mt76s_rx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
net_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i;
/* rx processing */
local_bh_disable();
rcu_read_lock();
mt76_for_each_q_rx(dev, i)
mt76s_process_rx_queue(dev, &dev->q_rx[i]);
rcu_read_unlock();
local_bh_enable();
}
void mt76s_deinit(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
int i;
mt76_worker_teardown(&sdio->txrx_worker);
mt76_worker_teardown(&sdio->status_worker);
mt76_worker_teardown(&sdio->net_worker);
mt76s_stop_txrx(dev);
if (sdio->txrx_wq) {
destroy_workqueue(sdio->txrx_wq);
sdio->txrx_wq = NULL;
}
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
mt76_tx_status_check(dev, NULL, true);
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
......@@ -335,16 +333,22 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
int err;
sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
WQ_UNBOUND | WQ_HIGHPRI,
WQ_UNBOUND_MAX_ACTIVE);
if (!sdio->txrx_wq)
return -ENOMEM;
err = mt76_worker_setup(dev->hw, &sdio->status_worker,
mt76s_status_worker, "sdio-status");
if (err)
return err;
err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
"sdio-net");
if (err)
return err;
sched_set_fifo_low(sdio->status_worker.task);
sched_set_fifo_low(sdio->net_worker.task);
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
INIT_WORK(&sdio->status_work, mt76s_tx_work);
INIT_WORK(&sdio->net_work, mt76s_rx_work);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment