Commit b433dce0 authored by Suzuki K. Poulose's avatar Suzuki K. Poulose Committed by Catalin Marinas

arm64: Handle section maps for swapper/idmap

We use section maps with 4K page size to create the swapper/idmaps.
So far we have used !64K or 4K checks to handle the case where we
use the section maps.
This patch adds a new symbol, ARM64_SWAPPER_USES_SECTION_MAPS, to
handle cases where we use section maps, instead of using the page size
symbols.

Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarSuzuki K. Poulose <suzuki.poulose@arm.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 87d1587b
...@@ -19,6 +19,19 @@ ...@@ -19,6 +19,19 @@
#ifndef __ASM_KERNEL_PGTABLE_H #ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H #define __ASM_KERNEL_PGTABLE_H
/*
* The linear mapping and the start of memory are both 2M aligned (per
* the arm64 booting.txt requirements). Hence we can use section mapping
* with 4K (section size = 2M) but not with 16K (section size = 32M) or
* 64K (section size = 512M).
*/
#ifdef CONFIG_ARM64_4K_PAGES
#define ARM64_SWAPPER_USES_SECTION_MAPS 1
#else
#define ARM64_SWAPPER_USES_SECTION_MAPS 0
#endif
/* /*
* The idmap and swapper page tables need some space reserved in the kernel * The idmap and swapper page tables need some space reserved in the kernel
* image. Both require pgd, pud (4 levels only) and pmd tables to (section) * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
...@@ -28,26 +41,28 @@ ...@@ -28,26 +41,28 @@
* could be increased on the fly if system RAM is out of reach for the default * could be increased on the fly if system RAM is out of reach for the default
* VA range, so 3 pages are reserved in all cases. * VA range, so 3 pages are reserved in all cases.
*/ */
#ifdef CONFIG_ARM64_64K_PAGES #if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
#else
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#else
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
#endif #endif
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) #define IDMAP_DIR_SIZE (3 * PAGE_SIZE)
/* Initial memory map size */ /* Initial memory map size */
#ifdef CONFIG_ARM64_64K_PAGES #if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
#define SWAPPER_BLOCK_SIZE PAGE_SIZE
#define SWAPPER_TABLE_SHIFT PMD_SHIFT
#else
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
#define SWAPPER_BLOCK_SIZE SECTION_SIZE #define SWAPPER_BLOCK_SIZE SECTION_SIZE
#define SWAPPER_TABLE_SHIFT PUD_SHIFT #define SWAPPER_TABLE_SHIFT PUD_SHIFT
#else
#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
#define SWAPPER_BLOCK_SIZE PAGE_SIZE
#define SWAPPER_TABLE_SHIFT PMD_SHIFT
#endif #endif
/* The size of the initial kernel direct mapping */
#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
/* /*
* Initial memory map attributes. * Initial memory map attributes.
...@@ -55,10 +70,10 @@ ...@@ -55,10 +70,10 @@
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#ifdef CONFIG_ARM64_64K_PAGES #if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#else
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#else
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#endif #endif
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sizes.h> #include <asm/sizes.h>
...@@ -406,14 +407,11 @@ static void __init map_mem(void) ...@@ -406,14 +407,11 @@ static void __init map_mem(void)
* memory addressable from the initial direct kernel mapping. * memory addressable from the initial direct kernel mapping.
* *
* The initial direct kernel mapping, located at swapper_pg_dir, gives * The initial direct kernel mapping, located at swapper_pg_dir, gives
* us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
* PHYS_OFFSET (which must be aligned to 2MB as per * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
* Documentation/arm64/booting.txt). * per Documentation/arm64/booting.txt).
*/ */
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
limit = PHYS_OFFSET + PMD_SIZE;
else
limit = PHYS_OFFSET + PUD_SIZE;
memblock_set_current_limit(limit); memblock_set_current_limit(limit);
/* map all the memory banks */ /* map all the memory banks */
...@@ -424,21 +422,24 @@ static void __init map_mem(void) ...@@ -424,21 +422,24 @@ static void __init map_mem(void)
if (start >= end) if (start >= end)
break; break;
#ifndef CONFIG_ARM64_64K_PAGES if (ARM64_SWAPPER_USES_SECTION_MAPS) {
/* /*
* For the first memory bank align the start address and * For the first memory bank align the start address and
* current memblock limit to prevent create_mapping() from * current memblock limit to prevent create_mapping() from
* allocating pte page tables from unmapped memory. * allocating pte page tables from unmapped memory. With
* When 64K pages are enabled, the pte page table for the * the section maps, if the first block doesn't end on section
* first PGDIR_SIZE is already present in swapper_pg_dir. * size boundary, create_mapping() will try to allocate a pte
* page, which may be returned from an unmapped area.
* When section maps are not used, the pte page table for the
* current limit is already present in swapper_pg_dir.
*/ */
if (start < limit) if (start < limit)
start = ALIGN(start, PMD_SIZE); start = ALIGN(start, SECTION_SIZE);
if (end < limit) { if (end < limit) {
limit = end & PMD_MASK; limit = end & SECTION_MASK;
memblock_set_current_limit(limit); memblock_set_current_limit(limit);
} }
#endif }
__map_memblock(start, end); __map_memblock(start, end);
} }
...@@ -551,12 +552,12 @@ int kern_addr_valid(unsigned long addr) ...@@ -551,12 +552,12 @@ int kern_addr_valid(unsigned long addr)
return pfn_valid(pte_pfn(*pte)); return pfn_valid(pte_pfn(*pte));
} }
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
#ifdef CONFIG_ARM64_64K_PAGES #if !ARM64_SWAPPER_USES_SECTION_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{ {
return vmemmap_populate_basepages(start, end, node); return vmemmap_populate_basepages(start, end, node);
} }
#else /* !CONFIG_ARM64_64K_PAGES */ #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{ {
unsigned long addr = start; unsigned long addr = start;
...@@ -691,7 +692,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) ...@@ -691,7 +692,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
{ {
const u64 dt_virt_base = __fix_to_virt(FIX_FDT); const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
pgprot_t prot = PAGE_KERNEL | PTE_RDONLY; pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
int granularity, size, offset; int size, offset;
void *dt_virt; void *dt_virt;
/* /*
...@@ -717,24 +718,15 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) ...@@ -717,24 +718,15 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
*/ */
BUILD_BUG_ON(dt_virt_base % SZ_2M); BUILD_BUG_ON(dt_virt_base % SZ_2M);
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) { BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT != __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
granularity = PAGE_SIZE;
} else {
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
__fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
granularity = PMD_SIZE;
}
offset = dt_phys % granularity; offset = dt_phys % SWAPPER_BLOCK_SIZE;
dt_virt = (void *)dt_virt_base + offset; dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */ /* map the first chunk so we can read the size from the header */
create_mapping(round_down(dt_phys, granularity), dt_virt_base, create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
granularity, prot); SWAPPER_BLOCK_SIZE, prot);
if (fdt_check_header(dt_virt) != 0) if (fdt_check_header(dt_virt) != 0)
return NULL; return NULL;
...@@ -743,9 +735,9 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) ...@@ -743,9 +735,9 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
if (size > MAX_FDT_SIZE) if (size > MAX_FDT_SIZE)
return NULL; return NULL;
if (offset + size > granularity) if (offset + size > SWAPPER_BLOCK_SIZE)
create_mapping(round_down(dt_phys, granularity), dt_virt_base, create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
round_up(offset + size, granularity), prot); round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
memblock_reserve(dt_phys, size); memblock_reserve(dt_phys, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment