Commit 44176bb3 authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Catalin Marinas

arm64: Add support for DMA_ATTR_FORCE_CONTIGUOUS to IOMMU

Add support for allocating physically contiguous DMA buffers on arm64
systems with an IOMMU.  This can be useful when two or more devices
with different memory requirements are involved in buffer sharing.

Note that as this uses the CMA allocator, setting the
DMA_ATTR_FORCE_CONTIGUOUS attribute has a runtime-dependency on
CONFIG_DMA_CMA, just like on arm32.

For arm64 systems using swiotlb, no changes are needed to support the
allocation of physically contiguous DMA buffers:
  - swiotlb always uses physically contiguous buffers (up to
    IO_TLB_SEGSIZE = 128 pages),
  - arm64's __dma_alloc_coherent() already calls
    dma_alloc_from_contiguous() when CMA is available.
Signed-off-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Acked-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent f13d52cb
...@@ -584,20 +584,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -584,20 +584,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
*/ */
gfp |= __GFP_ZERO; gfp |= __GFP_ZERO;
if (gfpflags_allow_blocking(gfp)) { if (!gfpflags_allow_blocking(gfp)) {
struct page **pages;
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
handle, flush_page);
if (!pages)
return NULL;
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
iommu_dma_free(dev, pages, iosize, handle);
} else {
struct page *page; struct page *page;
/* /*
* In atomic context we can't remap anything, so we'll only * In atomic context we can't remap anything, so we'll only
...@@ -621,6 +608,45 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -621,6 +608,45 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
__free_from_pool(addr, size); __free_from_pool(addr, size);
addr = NULL; addr = NULL;
} }
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
struct page *page;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size), gfp);
if (!page)
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
if (iommu_dma_mapping_error(dev, *handle)) {
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
return NULL;
}
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
prot,
__builtin_return_address(0));
if (!addr) {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
}
} else {
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
struct page **pages;
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
handle, flush_page);
if (!pages)
return NULL;
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
iommu_dma_free(dev, pages, iosize, handle);
} }
return addr; return addr;
} }
...@@ -632,7 +658,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -632,7 +658,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
/* /*
* @cpu_addr will be one of 3 things depending on how it was allocated: * @cpu_addr will be one of 4 things depending on how it was allocated:
* - A remapped array of pages for contiguous allocations.
* - A remapped array of pages from iommu_dma_alloc(), for all * - A remapped array of pages from iommu_dma_alloc(), for all
* non-atomic allocations. * non-atomic allocations.
* - A non-cacheable alias from the atomic pool, for atomic * - A non-cacheable alias from the atomic pool, for atomic
...@@ -644,6 +671,12 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -644,6 +671,12 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (__in_atomic_pool(cpu_addr, size)) { if (__in_atomic_pool(cpu_addr, size)) {
iommu_dma_unmap_page(dev, handle, iosize, 0, 0); iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_from_pool(cpu_addr, size); __free_from_pool(cpu_addr, size);
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
struct page *page = vmalloc_to_page(cpu_addr);
iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else if (is_vmalloc_addr(cpu_addr)){ } else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr); struct vm_struct *area = find_vm_area(cpu_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment