Commit 75d4bf1f authored by Felix Fietkau's avatar Felix Fietkau

mt76: dma: cache dma map address/len in struct mt76_queue_entry

Accessing them from uncached memory can be expensive, so it's cheaper to
cache them
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 2fe1a5d6
...@@ -49,6 +49,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -49,6 +49,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info, struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi) struct sk_buff *skb, void *txwi)
{ {
struct mt76_queue_entry *entry;
struct mt76_desc *desc; struct mt76_desc *desc;
u32 ctrl; u32 ctrl;
int i, idx = -1; int i, idx = -1;
...@@ -61,16 +62,27 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -61,16 +62,27 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < nbufs; i += 2, buf += 2) { for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0; u32 buf0 = buf[0].addr, buf1 = 0;
idx = q->head;
q->head = (q->head + 1) % q->ndesc;
desc = &q->desc[idx];
entry = &q->entry[idx];
if (buf[0].skip_unmap) if (buf[0].skip_unmap)
q->entry[q->head].skip_buf0 = true; entry->skip_buf0 = true;
q->entry[q->head].skip_buf1 = i == nbufs - 1; entry->skip_buf1 = i == nbufs - 1;
entry->dma_addr[0] = buf[0].addr;
entry->dma_len[0] = buf[0].len;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) { if (i < nbufs - 1) {
entry->dma_addr[1] = buf[1].addr;
entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr; buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
if (buf[1].skip_unmap) if (buf[1].skip_unmap)
q->entry[q->head].skip_buf1 = true; entry->skip_buf1 = true;
} }
if (i == nbufs - 1) if (i == nbufs - 1)
...@@ -78,11 +90,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -78,11 +90,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
else if (i == nbufs - 2) else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1; ctrl |= MT_DMA_CTL_LAST_SEC1;
idx = q->head;
q->head = (q->head + 1) % q->ndesc;
desc = &q->desc[idx];
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info)); WRITE_ONCE(desc->info, cpu_to_le32(info));
...@@ -102,24 +109,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, ...@@ -102,24 +109,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e) struct mt76_queue_entry *prev_e)
{ {
struct mt76_queue_entry *e = &q->entry[idx]; struct mt76_queue_entry *e = &q->entry[idx];
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
u32 ctrl = le32_to_cpu(__ctrl);
if (!e->skip_buf0) {
__le32 addr = READ_ONCE(q->desc[idx].buf0);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
dma_unmap_single(dev->dev, le32_to_cpu(addr), len, if (!e->skip_buf0)
dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE); DMA_TO_DEVICE);
}
if (!e->skip_buf1) {
__le32 addr = READ_ONCE(q->desc[idx].buf1);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
dma_unmap_single(dev->dev, le32_to_cpu(addr), len, if (!e->skip_buf1)
dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE); DMA_TO_DEVICE);
}
if (e->txwi == DMA_DUMMY_DATA) if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL; e->txwi = NULL;
...@@ -207,7 +204,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, ...@@ -207,7 +204,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
void *buf = e->buf; void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size); int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); buf_addr = e->dma_addr[0];
if (len) { if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
......
...@@ -102,6 +102,8 @@ struct mt76_queue_entry { ...@@ -102,6 +102,8 @@ struct mt76_queue_entry {
struct urb *urb; struct urb *urb;
int buf_sz; int buf_sz;
}; };
u32 dma_addr[2];
u16 dma_len[2];
u16 wcid; u16 wcid;
bool skip_buf0:1; bool skip_buf0:1;
bool skip_buf1:1; bool skip_buf1:1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment