Commit a202bd1a authored by Takashi Iwai's avatar Takashi Iwai

ALSA: core: Move mmap handler into memalloc ops

This patch moves the mmap handling code into the common memalloc
handler.  It allows us to reduce the memory-type specific code in PCM
code gracefully.

Link: https://lore.kernel.org/r/20210609162551.7842-5-tiwai@suse.deSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent 37af81c5
......@@ -11,6 +11,7 @@
struct device;
struct page;
struct vm_area_struct;
/*
* buffer device info
......@@ -69,6 +70,8 @@ int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
struct snd_dma_buffer *dmab);
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area);
dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
......
......@@ -127,6 +127,23 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
}
EXPORT_SYMBOL(snd_dma_free_pages);
/**
* snd_dma_buffer_mmap - perform mmap of the given DMA buffer
* @dmab: buffer allocation information
* @area: VM area information
*/
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
if (ops && ops->mmap)
return ops->mmap(dmab, area);
else
return -ENOENT;
}
EXPORT_SYMBOL(snd_dma_buffer_mmap);
/**
* snd_sgbuf_get_addr - return the physical address at the corresponding offset
* @dmab: buffer allocation information
......@@ -283,9 +300,20 @@ static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
}
static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return remap_pfn_range(area, area->vm_start,
dmab->addr >> PAGE_SHIFT,
area->vm_end - area->vm_start,
area->vm_page_prot);
}
static const struct snd_malloc_ops snd_dma_iram_ops = {
.alloc = snd_dma_iram_alloc,
.free = snd_dma_iram_free,
.mmap = snd_dma_iram_mmap,
};
#endif /* CONFIG_GENERIC_ALLOCATOR */
......@@ -320,9 +348,17 @@ static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
}
static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
return dma_mmap_coherent(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes);
}
static const struct snd_malloc_ops snd_dma_dev_ops = {
.alloc = snd_dma_dev_alloc,
.free = snd_dma_dev_free,
.mmap = snd_dma_dev_mmap,
};
#endif /* CONFIG_HAS_DMA */
......
......@@ -9,6 +9,7 @@ struct snd_malloc_ops {
struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size);
int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
};
#ifdef CONFIG_SND_DMA_SGBUF
......
......@@ -3700,22 +3700,9 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
#ifdef CONFIG_GENERIC_ALLOCATOR
if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return remap_pfn_range(area, area->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
area->vm_end - area->vm_start, area->vm_page_prot);
}
#endif /* CONFIG_GENERIC_ALLOCATOR */
if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
(substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
substream->runtime->dma_bytes);
if (!substream->ops->page &&
!snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
return 0;
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment