Commit f5ff79fd authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: remove CONFIG_DMA_REMAP

CONFIG_DMA_REMAP is used to build a few helpers around the core
vmalloc code, and to use them in case there is a highmem page in
dma-direct, and to make dma coherent allocations be able to use
non-contiguous pages allocations for DMA allocations in the dma-iommu
layer.

Right now it needs to be explicitly selected by architectures, and
is only done so by architectures that require remapping to deal
with devices that are not DMA coherent.  Make it unconditional for
builds with CONFIG_MMU as it is very little extra code, but makes
it much more likely that large DMA allocations succeed on x86.

This fixes hot plugging a NVMe thunderbolt SSD for me, which tries
to allocate a 1MB buffer that is otherwise hard to obtain due to
memory fragmentation on a heavily used laptop.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent fba09099
...@@ -47,7 +47,7 @@ config ARM ...@@ -47,7 +47,7 @@ config ARM
select DMA_DECLARE_COHERENT select DMA_DECLARE_COHERENT
select DMA_GLOBAL_POOL if !MMU select DMA_GLOBAL_POOL if !MMU
select DMA_OPS select DMA_OPS
select DMA_REMAP if MMU select DMA_NONCOHERENT_MMAP if MMU
select EDAC_SUPPORT select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
......
...@@ -17,7 +17,7 @@ config XTENSA ...@@ -17,7 +17,7 @@ config XTENSA
select BUILDTIME_TABLE_SORT select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
select DMA_REMAP if MMU select DMA_NONCOHERENT_MMAP if MMU
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
......
...@@ -852,7 +852,6 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -852,7 +852,6 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
return NULL; return NULL;
} }
#ifdef CONFIG_DMA_REMAP
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp, size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs) unsigned long attrs)
...@@ -882,7 +881,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, ...@@ -882,7 +881,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
sg_free_table(&sh->sgt); sg_free_table(&sh->sgt);
kfree(sh); kfree(sh);
} }
#endif /* CONFIG_DMA_REMAP */
static void iommu_dma_sync_single_for_cpu(struct device *dev, static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
...@@ -1276,7 +1274,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) ...@@ -1276,7 +1274,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
dma_free_from_pool(dev, cpu_addr, alloc_size)) dma_free_from_pool(dev, cpu_addr, alloc_size))
return; return;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { if (is_vmalloc_addr(cpu_addr)) {
/* /*
* If it the address is remapped, then it's either non-coherent * If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction. * or highmem CMA, or an iommu_dma_alloc_remap() construction.
...@@ -1318,7 +1316,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, ...@@ -1318,7 +1316,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
if (!page) if (!page)
return NULL; return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { if (!coherent || PageHighMem(page)) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size, cpu_addr = dma_common_contiguous_remap(page, alloc_size,
...@@ -1350,7 +1348,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, ...@@ -1350,7 +1348,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
gfp |= __GFP_ZERO; gfp |= __GFP_ZERO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && if (gfpflags_allow_blocking(gfp) &&
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
return iommu_dma_alloc_remap(dev, size, handle, gfp, return iommu_dma_alloc_remap(dev, size, handle, gfp,
dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
...@@ -1391,7 +1389,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -1391,7 +1389,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= nr_pages || vma_pages(vma) > nr_pages - off) if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
return -ENXIO; return -ENXIO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr); struct page **pages = dma_common_find_pages(cpu_addr);
if (pages) if (pages)
...@@ -1413,7 +1411,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -1413,7 +1411,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
struct page *page; struct page *page;
int ret; int ret;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr); struct page **pages = dma_common_find_pages(cpu_addr);
if (pages) { if (pages) {
...@@ -1445,10 +1443,8 @@ static const struct dma_map_ops iommu_dma_ops = { ...@@ -1445,10 +1443,8 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free, .free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages, .alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
#ifdef CONFIG_DMA_REMAP
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous, .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous, .free_noncontiguous = iommu_dma_free_noncontiguous,
#endif
.mmap = iommu_dma_mmap, .mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable, .get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page, .map_page = iommu_dma_map_page,
......
...@@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL ...@@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL
select DMA_DECLARE_COHERENT select DMA_DECLARE_COHERENT
bool bool
config DMA_REMAP
bool
depends on MMU
select DMA_NONCOHERENT_MMAP
config DMA_DIRECT_REMAP config DMA_DIRECT_REMAP
bool bool
select DMA_REMAP
select DMA_COHERENT_POOL select DMA_COHERENT_POOL
select DMA_NONCOHERENT_MMAP
config DMA_CMA config DMA_CMA
bool "DMA Contiguous Memory Allocator" bool "DMA Contiguous Memory Allocator"
......
...@@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o ...@@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
obj-$(CONFIG_DMA_REMAP) += remap.o obj-$(CONFIG_MMU) += remap.o
obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
...@@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size, ...@@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page) if (!page)
return NULL; return NULL;
/*
* dma_alloc_contiguous can return highmem pages depending on a
* combination the cma= arguments and per-arch setup. These need to be
* remapped to return a kernel virtual address.
*/
if (PageHighMem(page)) { if (PageHighMem(page)) {
/*
* Depending on the cma= arguments and per-arch setup,
* dma_alloc_contiguous could return highmem pages.
* Without remapping there is no way to return them here, so
* log an error and fail.
*/
if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
dev_info(dev, "Rejecting highmem page from CMA.\n");
goto out_free_pages;
}
remap = true; remap = true;
set_uncached = false; set_uncached = false;
} }
...@@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size, ...@@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return; return;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { if (is_vmalloc_addr(cpu_addr)) {
vunmap(cpu_addr); vunmap(cpu_addr);
} else { } else {
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment