Commit ad67f5a6 authored by Christoph Hellwig's avatar Christoph Hellwig

arm64: replace ZONE_DMA with ZONE_DMA32

arm64 uses ZONE_DMA for allocations below 32-bits.  These days we
name the zone for that ZONE_DMA32, which will allow to use the
dma-direct and generic swiotlb code as-is, so rename it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent ac2e8860
...@@ -227,7 +227,7 @@ config GENERIC_CSUM ...@@ -227,7 +227,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY config GENERIC_CALIBRATE_DELAY
def_bool y def_bool y
config ZONE_DMA config ZONE_DMA32
def_bool y def_bool y
config HAVE_GENERIC_GUP config HAVE_GENERIC_GUP
......
...@@ -95,9 +95,9 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, ...@@ -95,9 +95,9 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs) unsigned long attrs)
{ {
if (IS_ENABLED(CONFIG_ZONE_DMA) && if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32)) dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA; flags |= GFP_DMA32;
if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
struct page *page; struct page *page;
void *addr; void *addr;
...@@ -397,7 +397,7 @@ static int __init atomic_pool_init(void) ...@@ -397,7 +397,7 @@ static int __init atomic_pool_init(void)
page = dma_alloc_from_contiguous(NULL, nr_pages, page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order, GFP_KERNEL); pool_size_order, GFP_KERNEL);
else else
page = alloc_pages(GFP_DMA, pool_size_order); page = alloc_pages(GFP_DMA32, pool_size_order);
if (page) { if (page) {
int ret; int ret;
......
...@@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void) ...@@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void)
} }
#endif /* CONFIG_CRASH_DUMP */ #endif /* CONFIG_CRASH_DUMP */
/* /*
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will * currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset. * use a DMA offset.
*/ */
...@@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) ...@@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{ {
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
if (IS_ENABLED(CONFIG_ZONE_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA32))
max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
max_zone_pfns[ZONE_NORMAL] = max; max_zone_pfns[ZONE_NORMAL] = max;
free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns);
...@@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) ...@@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
memset(zone_size, 0, sizeof(zone_size)); memset(zone_size, 0, sizeof(zone_size));
/* 4GB maximum for 32-bit only capable devices */ /* 4GB maximum for 32-bit only capable devices */
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA32
max_dma = PFN_DOWN(arm64_dma_phys_limit); max_dma = PFN_DOWN(arm64_dma_phys_limit);
zone_size[ZONE_DMA] = max_dma - min; zone_size[ZONE_DMA32] = max_dma - min;
#endif #endif
zone_size[ZONE_NORMAL] = max - max_dma; zone_size[ZONE_NORMAL] = max - max_dma;
...@@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) ...@@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max) if (start >= max)
continue; continue;
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA32
if (start < max_dma) { if (start < max_dma) {
unsigned long dma_end = min(end, max_dma); unsigned long dma_end = min(end, max_dma);
zhole_size[ZONE_DMA] -= dma_end - start; zhole_size[ZONE_DMA32] -= dma_end - start;
} }
#endif #endif
if (end > max_dma) { if (end > max_dma) {
...@@ -467,7 +467,7 @@ void __init arm64_memblock_init(void) ...@@ -467,7 +467,7 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */ /* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma_phys_limit = max_zone_dma_phys(); arm64_dma_phys_limit = max_zone_dma_phys();
else else
arm64_dma_phys_limit = PHYS_MASK + 1; arm64_dma_phys_limit = PHYS_MASK + 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment