Commit ea8c2e11 authored by Catalin Marinas's avatar Catalin Marinas

arm64: Extend the idmap to the whole kernel image

This patch changes the idmap page table creation during boot to cover
the whole kernel image, allowing functions like cpu_reset() to be safely
called with the physical address.

This patch also simplifies the create_block_map asm macro to no longer
take an idmap argument and always use the phys/virt/end parameters. For
the idmap case, phys == virt.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 020c1427
...@@ -384,26 +384,18 @@ ENDPROC(__calc_phys_offset) ...@@ -384,26 +384,18 @@ ENDPROC(__calc_phys_offset)
* Preserves: tbl, flags * Preserves: tbl, flags
* Corrupts: phys, start, end, pstate * Corrupts: phys, start, end, pstate
*/ */
.macro create_block_map, tbl, flags, phys, start, end, idmap=0 .macro create_block_map, tbl, flags, phys, start, end
lsr \phys, \phys, #BLOCK_SHIFT lsr \phys, \phys, #BLOCK_SHIFT
.if \idmap
and \start, \phys, #PTRS_PER_PTE - 1 // table index
.else
lsr \start, \start, #BLOCK_SHIFT lsr \start, \start, #BLOCK_SHIFT
and \start, \start, #PTRS_PER_PTE - 1 // table index and \start, \start, #PTRS_PER_PTE - 1 // table index
.endif
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
.ifnc \start,\end
lsr \end, \end, #BLOCK_SHIFT lsr \end, \end, #BLOCK_SHIFT
and \end, \end, #PTRS_PER_PTE - 1 // table end index and \end, \end, #PTRS_PER_PTE - 1 // table end index
.endif
9999: str \phys, [\tbl, \start, lsl #3] // store the entry 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
.ifnc \start,\end
add \start, \start, #1 // next entry add \start, \start, #1 // next entry
add \phys, \phys, #BLOCK_SIZE // next block add \phys, \phys, #BLOCK_SIZE // next block
cmp \start, \end cmp \start, \end
b.ls 9999b b.ls 9999b
.endif
.endm .endm
/* /*
...@@ -435,9 +427,13 @@ __create_page_tables: ...@@ -435,9 +427,13 @@ __create_page_tables:
* Create the identity mapping. * Create the identity mapping.
*/ */
add x0, x25, #PAGE_SIZE // section table address add x0, x25, #PAGE_SIZE // section table address
adr x3, __turn_mmu_on // virtual/physical address ldr x3, =KERNEL_START
add x3, x3, x28 // __pa(KERNEL_START)
create_pgd_entry x25, x0, x3, x5, x6 create_pgd_entry x25, x0, x3, x5, x6
create_block_map x0, x7, x3, x5, x5, idmap=1 ldr x6, =KERNEL_END
mov x5, x3 // __pa(KERNEL_START)
add x6, x6, x28 // __pa(KERNEL_END)
create_block_map x0, x7, x3, x5, x6
/* /*
* Map the kernel image (starting with PHYS_OFFSET). * Map the kernel image (starting with PHYS_OFFSET).
...@@ -445,7 +441,7 @@ __create_page_tables: ...@@ -445,7 +441,7 @@ __create_page_tables:
add x0, x26, #PAGE_SIZE // section table address add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET mov x5, #PAGE_OFFSET
create_pgd_entry x26, x0, x5, x3, x6 create_pgd_entry x26, x0, x5, x3, x6
ldr x6, =KERNEL_END - 1 ldr x6, =KERNEL_END
mov x3, x24 // phys offset mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6 create_block_map x0, x7, x3, x5, x6
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment