Commit 8b5369ea authored by Nicolas Saenz Julienne's avatar Nicolas Saenz Julienne Committed by Catalin Marinas

dma/direct: turn ARCH_ZONE_DMA_BITS into a variable

Some architectures, notably ARM, are interested in tweaking this
depending on their runtime DMA addressing limitations.
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicolas Saenz Julienne <nsaenzjulienne@suse.de>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 4686da51
...@@ -38,6 +38,4 @@ extern int pfn_valid(unsigned long); ...@@ -38,6 +38,4 @@ extern int pfn_valid(unsigned long);
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#define ARCH_ZONE_DMA_BITS 30
#endif #endif
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/dma-direct.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
#include <linux/efi.h> #include <linux/efi.h>
...@@ -41,6 +42,8 @@ ...@@ -41,6 +42,8 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#define ARM64_ZONE_DMA_BITS 30
/* /*
* We need to be able to catch inadvertent references to memstart_addr * We need to be able to catch inadvertent references to memstart_addr
* that occur (potentially in generic code) before arm64_memblock_init() * that occur (potentially in generic code) before arm64_memblock_init()
...@@ -440,8 +443,10 @@ void __init arm64_memblock_init(void) ...@@ -440,8 +443,10 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
if (IS_ENABLED(CONFIG_ZONE_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA)) {
arm64_dma_phys_limit = max_zone_phys(ARCH_ZONE_DMA_BITS); zone_dma_bits = ARM64_ZONE_DMA_BITS;
arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
}
if (IS_ENABLED(CONFIG_ZONE_DMA32)) if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma32_phys_limit = max_zone_phys(32); arm64_dma32_phys_limit = max_zone_phys(32);
......
...@@ -329,13 +329,4 @@ struct vm_area_struct; ...@@ -329,13 +329,4 @@ struct vm_area_struct;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#include <asm/slice.h> #include <asm/slice.h>
/*
* Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
*/
#ifdef CONFIG_PPC32
#define ARCH_ZONE_DMA_BITS 30
#else
#define ARCH_ZONE_DMA_BITS 31
#endif
#endif /* _ASM_POWERPC_PAGE_H */ #endif /* _ASM_POWERPC_PAGE_H */
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/memremap.h> #include <linux/memremap.h>
#include <linux/dma-direct.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -201,10 +202,10 @@ static int __init mark_nonram_nosave(void) ...@@ -201,10 +202,10 @@ static int __init mark_nonram_nosave(void)
* everything else. GFP_DMA32 page allocations automatically fall back to * everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA. * ZONE_DMA.
* *
* By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
* inform the generic DMA mapping code. 32-bit only devices (if not handled * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
* by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
* otherwise served by ZONE_DMA. * ZONE_DMA.
*/ */
static unsigned long max_zone_pfns[MAX_NR_ZONES]; static unsigned long max_zone_pfns[MAX_NR_ZONES];
...@@ -237,9 +238,18 @@ void __init paging_init(void) ...@@ -237,9 +238,18 @@ void __init paging_init(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n", printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20)); (long int)((top_of_ram - total_ram) >> 20));
/*
* Allow 30-bit DMA for very limited Broadcom wifi chips on many
* powerbooks.
*/
if (IS_ENABLED(CONFIG_PPC32))
zone_dma_bits = 30;
else
zone_dma_bits = 31;
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn, max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT)); 1UL << (zone_dma_bits - PAGE_SHIFT));
#endif #endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
......
...@@ -177,8 +177,6 @@ static inline int devmem_is_allowed(unsigned long pfn) ...@@ -177,8 +177,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define ARCH_ZONE_DMA_BITS 31
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
......
...@@ -118,6 +118,7 @@ void __init paging_init(void) ...@@ -118,6 +118,7 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
extern unsigned int zone_dma_bits;
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h> #include <asm/dma-direct.h>
#else #else
......
...@@ -16,12 +16,11 @@ ...@@ -16,12 +16,11 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
/* /*
* Most architectures use ZONE_DMA for the first 16 Megabytes, but * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
* some use it for entirely different regions: * it for entirely different regions. In that case the arch code needs to
* override the variable below for dma-direct to work properly.
*/ */
#ifndef ARCH_ZONE_DMA_BITS unsigned int zone_dma_bits __ro_after_init = 24;
#define ARCH_ZONE_DMA_BITS 24
#endif
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size) static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{ {
...@@ -69,7 +68,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, ...@@ -69,7 +68,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
* Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
* zones. * zones.
*/ */
if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) if (*phys_mask <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA; return GFP_DMA;
if (*phys_mask <= DMA_BIT_MASK(32)) if (*phys_mask <= DMA_BIT_MASK(32))
return GFP_DMA32; return GFP_DMA32;
...@@ -395,7 +394,7 @@ int dma_direct_supported(struct device *dev, u64 mask) ...@@ -395,7 +394,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
u64 min_mask; u64 min_mask;
if (IS_ENABLED(CONFIG_ZONE_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA))
min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); min_mask = DMA_BIT_MASK(zone_dma_bits);
else else
min_mask = DMA_BIT_MASK(32); min_mask = DMA_BIT_MASK(32);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment