Commit c2f2124e authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: handle DMA_ATTR_NON_CONSISTENT in common code

Only call into arch_dma_alloc if we require an uncached mapping,
and remove the parisc code manually doing normal cached
DMA_ATTR_NON_CONSISTENT allocations.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: Helge Deller <deller@gmx.de> # parisc
parent 4b85faed
...@@ -394,17 +394,20 @@ pcxl_dma_init(void) ...@@ -394,17 +394,20 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init); __initcall(pcxl_dma_init);
static void *pcxl_dma_alloc(struct device *dev, size_t size, void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
unsigned long vaddr; unsigned long vaddr;
unsigned long paddr; unsigned long paddr;
int order; int order;
if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
return NULL;
order = get_order(size); order = get_order(size);
size = 1 << (order + PAGE_SHIFT); size = 1 << (order + PAGE_SHIFT);
vaddr = pcxl_alloc_range(size); vaddr = pcxl_alloc_range(size);
paddr = __get_free_pages(flag | __GFP_ZERO, order); paddr = __get_free_pages(gfp | __GFP_ZERO, order);
flush_kernel_dcache_range(paddr, size); flush_kernel_dcache_range(paddr, size);
paddr = __pa(paddr); paddr = __pa(paddr);
map_uncached_pages(vaddr, size, paddr); map_uncached_pages(vaddr, size, paddr);
...@@ -421,44 +424,19 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size, ...@@ -421,44 +424,19 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
return (void *)vaddr; return (void *)vaddr;
} }
static void *pcx_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
void *addr;
if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
return NULL;
addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
if (addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
return addr;
}
void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
else
return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr, void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs) dma_addr_t dma_handle, unsigned long attrs)
{ {
int order = get_order(size); int order = get_order(size);
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
boot_cpu_data.cpu_type != pcxl);
size = 1 << (order + PAGE_SHIFT); size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size); unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size); pcxl_free_range((unsigned long)vaddr, size);
vaddr = __va(dma_handle); free_pages((unsigned long)__va(dma_handle), order);
}
free_pages((unsigned long)vaddr, get_order(size));
} }
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
......
...@@ -191,7 +191,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, ...@@ -191,7 +191,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev)) dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
} }
...@@ -200,7 +200,7 @@ void dma_direct_free(struct device *dev, size_t size, ...@@ -200,7 +200,7 @@ void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{ {
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev)) dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else else
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment