Commit c7909509 authored by Marek Szyprowski's avatar Marek Szyprowski

ARM: integrate CMA with DMA-mapping subsystem

This patch adds support for CMA to dma-mapping subsystem for ARM
architecture. By default a global CMA area is used, but specific devices
are allowed to have their private memory areas if required (they can be
created with dma_declare_contiguous() function during board
initialisation).

Contiguous memory areas reserved for DMA are remapped with 2-level page
tables on boot. Once a buffer is requested, a low memory kernel mapping
is updated to to match requested memory access type.

GFP_ATOMIC allocations are performed from special pool which is created
early during boot. This way remapping page attributes is not needed on
allocation time.

CMA has been enabled unconditionally for ARMv6+ systems.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
CC: Michal Nazarewicz <mina86@mina86.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Tested-by: default avatarRob Clark <rob.clark@linaro.org>
Tested-by: default avatarOhad Ben-Cohen <ohad@wizery.com>
Tested-by: default avatarBenjamin Gaignard <benjamin.gaignard@linaro.org>
Tested-by: default avatarRobert Nelson <robertcnelson@gmail.com>
Tested-by: default avatarBarry Song <Baohua.Song@csr.com>
parent 0a2b9a6e
...@@ -520,6 +520,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -520,6 +520,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
a hypervisor. a hypervisor.
Default: yes Default: yes
coherent_pool=nn[KMG] [ARM,KNL]
Sets the size of memory pool for coherent, atomic dma
allocations if Contiguous Memory Allocator (CMA) is used.
code_bytes [X86] How many bytes of object code to print code_bytes [X86] How many bytes of object code to print
in an oops report. in an oops report.
Range: 0 - 8192 Range: 0 - 8192
......
...@@ -4,6 +4,8 @@ config ARM ...@@ -4,6 +4,8 @@ config ARM
select HAVE_AOUT select HAVE_AOUT
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select RTC_LIB select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION select SYS_SUPPORTS_APM_EMULATION
......
#ifndef ASMARM_DMA_CONTIGUOUS_H
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
#ifdef CONFIG_CMA
#include <linux/types.h>
#include <asm-generic/dma-contiguous.h>
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
#endif
#endif
#endif
...@@ -30,6 +30,7 @@ struct map_desc { ...@@ -30,6 +30,7 @@ struct map_desc {
#define MT_MEMORY_DTCM 12 #define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13 #define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14 #define MT_MEMORY_SO 14
#define MT_MEMORY_DMA_READY 15
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int); extern void iotable_init(struct map_desc *, int);
......
...@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup); ...@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup);
extern void paging_init(struct machine_desc *desc); extern void paging_init(struct machine_desc *desc);
extern void sanity_check_meminfo(void); extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str); extern void reboot_setup(char *str);
extern void setup_dma_zone(struct machine_desc *desc);
unsigned int processor_id; unsigned int processor_id;
EXPORT_SYMBOL(processor_id); EXPORT_SYMBOL(processor_id);
...@@ -939,12 +940,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -939,12 +940,8 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc; machine_desc = mdesc;
machine_name = mdesc->name; machine_name = mdesc->name;
#ifdef CONFIG_ZONE_DMA setup_dma_zone(mdesc);
if (mdesc->dma_zone_size) {
extern unsigned long arm_dma_zone_size;
arm_dma_zone_size = mdesc->dma_zone_size;
}
#endif
if (mdesc->restart_mode) if (mdesc->restart_mode)
reboot_setup(&mdesc->restart_mode); reboot_setup(&mdesc->restart_mode);
......
This diff is collapsed.
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/memblock.h> #include <asm/memblock.h>
...@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, ...@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
} }
#endif #endif
void __init setup_dma_zone(struct machine_desc *mdesc)
{
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
#endif
}
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high) unsigned long max_high)
{ {
...@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, ...@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
* Adjust the sizes according to any special requirements for * Adjust the sizes according to any special requirements for
* this machine type. * this machine type.
*/ */
if (arm_dma_zone_size) { if (arm_dma_zone_size)
arm_adjust_dma_zone(zone_size, zhole_size, arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT); arm_dma_zone_size >> PAGE_SHIFT);
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
#endif #endif
free_area_init_node(0, zone_size, min, zhole_size); free_area_init_node(0, zone_size, min, zhole_size);
...@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) ...@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve) if (mdesc->reserve)
mdesc->reserve(); mdesc->reserve();
/*
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
*/
dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
arm_memblock_steal_permitted = false; arm_memblock_steal_permitted = false;
memblock_allow_resize(); memblock_allow_resize();
memblock_dump_all(); memblock_dump_all();
......
...@@ -67,5 +67,8 @@ extern u32 arm_dma_limit; ...@@ -67,5 +67,8 @@ extern u32 arm_dma_limit;
#define arm_dma_limit ((u32)~0) #define arm_dma_limit ((u32)~0)
#endif #endif
extern phys_addr_t arm_lowmem_limit;
void __init bootmem_init(void); void __init bootmem_init(void);
void arm_mm_memblock_reserve(void); void arm_mm_memblock_reserve(void);
void dma_contiguous_remap(void);
...@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = { ...@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
PMD_SECT_UNCACHED | PMD_SECT_XN, PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MEMORY_DMA_READY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
}; };
const struct mem_type *get_mem_type(unsigned int type) const struct mem_type *get_mem_type(unsigned int type)
...@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void) ...@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
if (arch_is_coherent() && cpu_is_xsc3()) { if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
} }
...@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void) ...@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
} }
...@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void) ...@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot; mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd;
...@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, ...@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
* L1 entries, whereas PGDs refer to a group of L1 entries making * L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table. * up one logical pointer to an L2 table.
*/ */
if (((addr | end | phys) & ~SECTION_MASK) == 0) { if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd; pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
...@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg) ...@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
} }
early_param("vmalloc", early_vmalloc); early_param("vmalloc", early_vmalloc);
static phys_addr_t lowmem_limit __initdata = 0; phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void) void __init sanity_check_meminfo(void)
{ {
...@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void) ...@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
bank->size = newsize; bank->size = newsize;
} }
#endif #endif
if (!bank->highmem && bank->start + bank->size > lowmem_limit) if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
lowmem_limit = bank->start + bank->size; arm_lowmem_limit = bank->start + bank->size;
j++; j++;
} }
...@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void) ...@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
} }
#endif #endif
meminfo.nr_banks = j; meminfo.nr_banks = j;
high_memory = __va(lowmem_limit - 1) + 1; high_memory = __va(arm_lowmem_limit - 1) + 1;
memblock_set_current_limit(lowmem_limit); memblock_set_current_limit(arm_lowmem_limit);
} }
static inline void prepare_page_table(void) static inline void prepare_page_table(void)
...@@ -949,8 +957,8 @@ static inline void prepare_page_table(void) ...@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
* Find the end of the first block of lowmem. * Find the end of the first block of lowmem.
*/ */
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
if (end >= lowmem_limit) if (end >= arm_lowmem_limit)
end = lowmem_limit; end = arm_lowmem_limit;
/* /*
* Clear out all the kernel space mappings, except for the first * Clear out all the kernel space mappings, except for the first
...@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void) ...@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
phys_addr_t end = start + reg->size; phys_addr_t end = start + reg->size;
struct map_desc map; struct map_desc map;
if (end > lowmem_limit) if (end > arm_lowmem_limit)
end = lowmem_limit; end = arm_lowmem_limit;
if (start >= end) if (start >= end)
break; break;
...@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc) ...@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
{ {
void *zero_page; void *zero_page;
memblock_set_current_limit(lowmem_limit); memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table(); build_mem_type_table();
prepare_page_table(); prepare_page_table();
map_lowmem(); map_lowmem();
dma_contiguous_remap();
devicemaps_init(mdesc); devicemaps_init(mdesc);
kmap_init(); kmap_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment