Commit c880a514 authored by Takashi Iwai's avatar Takashi Iwai

ALSA: memalloc: Use proper DMA mapping API for x86 WC buffer allocations

The x86 WC page allocation assumes incorrectly the DMA address
directly taken from the page.  Also it checks the DMA ops
inappropriately for switching to the own method.

This patch rewrites the stuff to use the proper DMA mapping API
instead.

Link: https://patch.msgid.link/20240912155227.4078-2-tiwai@suse.deSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent a51c925c
...@@ -496,41 +496,54 @@ static const struct snd_malloc_ops snd_dma_dev_ops = { ...@@ -496,41 +496,54 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
/* /*
* Write-combined pages * Write-combined pages
*/ */
/* x86-specific allocations */
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
#define x86_fallback(dmab) (!get_dma_ops(dmab->dev.dev)) /* x86-specific allocations */
#else static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
#define x86_fallback(dmab) false {
#endif void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
if (!p)
return NULL;
dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL);
if (dmab->addr == DMA_MAPPING_ERROR) {
do_free_pages(dmab->area, size, true);
return NULL;
}
return p;
}
static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
{
dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes,
DMA_BIDIRECTIONAL);
do_free_pages(dmab->area, dmab->bytes, true);
}
static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return dma_mmap_coherent(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes);
}
#else
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{ {
if (x86_fallback(dmab))
return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
} }
static void snd_dma_wc_free(struct snd_dma_buffer *dmab) static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
{ {
if (x86_fallback(dmab)) {
do_free_pages(dmab->area, dmab->bytes, true);
return;
}
dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
} }
static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area) struct vm_area_struct *area)
{ {
#ifdef CONFIG_SND_DMA_SGBUF
if (x86_fallback(dmab)) {
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return snd_dma_continuous_mmap(dmab, area);
}
#endif
return dma_mmap_wc(dmab->dev.dev, area, return dma_mmap_wc(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes); dmab->area, dmab->addr, dmab->bytes);
} }
#endif
static const struct snd_malloc_ops snd_dma_wc_ops = { static const struct snd_malloc_ops snd_dma_wc_ops = {
.alloc = snd_dma_wc_alloc, .alloc = snd_dma_wc_alloc,
...@@ -804,7 +817,7 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) ...@@ -804,7 +817,7 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
dmab->dev.type = type; /* restore the type */ dmab->dev.type = type; /* restore the type */
/* if IOMMU is present but failed, give up */ /* if IOMMU is present but failed, give up */
if (!x86_fallback(dmab)) if (get_dma_ops(dmab->dev.dev))
return NULL; return NULL;
/* try fallback */ /* try fallback */
return snd_dma_sg_fallback_alloc(dmab, size); return snd_dma_sg_fallback_alloc(dmab, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment