Commit 5fbc49ce authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Will Deacon

arm64: mm: Reserve enough pages for the initial ID map

The logic that conditionally allocates one additional page at each
swapper page table level if KASLR is enabled is also applied to the
initial ID map, now that we have started using the same set of macros
to allocate the space for it.

However, the placement of the kernel in physical memory might result in
additional pages being needed at any level, even if KASLR is disabled in
the build. So account for this in the computation.

Fixes: c3cee924 ("arm64: head: cover entire kernel image in initial ID map")
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220826164800.2059148-1-ardb@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 6bb0d64c
...@@ -64,28 +64,28 @@ ...@@ -64,28 +64,28 @@
#define EARLY_KASLR (0) #define EARLY_KASLR (0)
#endif #endif
#define EARLY_ENTRIES(vstart, vend, shift) \ #define EARLY_ENTRIES(vstart, vend, shift, add) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR) ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
#if SWAPPER_PGTABLE_LEVELS > 3 #if SWAPPER_PGTABLE_LEVELS > 3
#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT)) #define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
#else #else
#define EARLY_PUDS(vstart, vend) (0) #define EARLY_PUDS(vstart, vend, add) (0)
#endif #endif
#if SWAPPER_PGTABLE_LEVELS > 2 #if SWAPPER_PGTABLE_LEVELS > 2
#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT)) #define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
#else #else
#define EARLY_PMDS(vstart, vend) (0) #define EARLY_PMDS(vstart, vend, add) (0)
#endif #endif
#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \ #define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \ + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */ + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
/* the initial ID map may need two extra pages if it needs to be extended */ /* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48 #if VA_BITS < 48
...@@ -93,7 +93,7 @@ ...@@ -93,7 +93,7 @@
#else #else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE) #define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
#endif #endif
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE) #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
/* Initial memory map size */ /* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS #if ARM64_KERNEL_USES_PMD_MAPS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment