Commit b0578865 authored by Felix Fietkau's avatar Felix Fietkau Committed by Kalle Valo

ath10k: do not use coherent memory for allocated device memory chunks

Coherent memory is more expensive to allocate (and constrained on some
architectures where it has to be pre-allocated). It is also completely
unnecessary, since the host has no reason to even access these allocated
memory spaces
Signed-off-by: default avatarFelix Fietkau <nbd@openwrt.org>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 81ec3c09
...@@ -4312,34 +4312,58 @@ void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) ...@@ -4312,34 +4312,58 @@ void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
} }
static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
u32 num_units, u32 unit_len) u32 num_units, u32 unit_len)
{ {
dma_addr_t paddr; dma_addr_t paddr;
u32 pool_size; u32 pool_size = 0;
int idx = ar->wmi.num_mem_chunks; int idx = ar->wmi.num_mem_chunks;
void *vaddr = NULL;
pool_size = num_units * round_up(unit_len, 4); if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks))
return -ENOMEM;
if (!pool_size) while (!vaddr && num_units) {
return -EINVAL; pool_size = num_units * round_up(unit_len, 4);
if (!pool_size)
return -EINVAL;
ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN);
pool_size, if (!vaddr)
&paddr, num_units /= 2;
GFP_KERNEL);
if (!ar->wmi.mem_chunks[idx].vaddr) {
ath10k_warn(ar, "failed to allocate memory chunk\n");
return -ENOMEM;
} }
memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); if (!num_units)
return -ENOMEM;
paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE);
if (dma_mapping_error(ar->dev, paddr)) {
kfree(vaddr);
return -ENOMEM;
}
ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr; ar->wmi.mem_chunks[idx].paddr = paddr;
ar->wmi.mem_chunks[idx].len = pool_size; ar->wmi.mem_chunks[idx].len = pool_size;
ar->wmi.mem_chunks[idx].req_id = req_id; ar->wmi.mem_chunks[idx].req_id = req_id;
ar->wmi.num_mem_chunks++; ar->wmi.num_mem_chunks++;
return num_units;
}
static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
u32 num_units, u32 unit_len)
{
int ret;
while (num_units) {
ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
if (ret < 0)
return ret;
num_units -= ret;
}
return 0; return 0;
} }
...@@ -7717,10 +7741,11 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar) ...@@ -7717,10 +7741,11 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
/* free the host memory chunks requested by firmware */ /* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) { for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev, dma_unmap_single(ar->dev,
ar->wmi.mem_chunks[i].len, ar->wmi.mem_chunks[i].paddr,
ar->wmi.mem_chunks[i].vaddr, ar->wmi.mem_chunks[i].len,
ar->wmi.mem_chunks[i].paddr); DMA_TO_DEVICE);
kfree(ar->wmi.mem_chunks[i].vaddr);
} }
ar->wmi.num_mem_chunks = 0; ar->wmi.num_mem_chunks = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment