Commit 3690951f authored by Catalin Marinas's avatar Catalin Marinas

arm64: Use swiotlb late initialisation

Since arm64 does not support ISA, there is no need for early swiotlb
initialisation. This patch switches the DMA mapping code to
swiotlb_tlb_late_init_with_default_size(). A side effect of this is that
GFP_DMA is used for the swiotlb buffer and devices with a 32-bit
coherent mask are correctly supported.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 19e7640d
...@@ -93,11 +93,17 @@ static struct dma_map_ops coherent_swiotlb_dma_ops = { ...@@ -93,11 +93,17 @@ static struct dma_map_ops coherent_swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
}; };
void __init arm64_swiotlb_init(void) extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{ {
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
dma_ops = &coherent_swiotlb_dma_ops; dma_ops = &coherent_swiotlb_dma_ops;
swiotlb_init(1);
return swiotlb_late_init_with_default_size(swiotlb_size);
} }
subsys_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
......
...@@ -262,8 +262,6 @@ static void __init free_unused_memmap(void) ...@@ -262,8 +262,6 @@ static void __init free_unused_memmap(void)
*/ */
void __init mem_init(void) void __init mem_init(void)
{ {
arm64_swiotlb_init();
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
#ifndef CONFIG_SPARSEMEM_VMEMMAP #ifndef CONFIG_SPARSEMEM_VMEMMAP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment