Commit 7614ff32 authored by Balbir Singh's avatar Balbir Singh Committed by Michael Ellerman

powerpc/mm/radix: Implement STRICT_RWX/mark_rodata_ro() for Radix

The Radix linear mapping code (create_physical_mapping()) tries to use
the largest page size it can at each step. Currently the only reason
it steps down to a smaller page size is if the start addr is
unaligned (never happens in practice), or the end of memory is not
aligned to a huge page boundary.

To support STRICT_RWX we need to break the mapping at __init_begin,
so that the text and rodata prior to that can be marked R_X and the
regular pages after can be marked RW.

Having done that we can now implement mark_rodata_ro() for Radix,
knowing that we won't need to split any mappings.
Signed-off-by: default avatarBalbir Singh <bsingharora@gmail.com>
[mpe: Split down to PAGE_SIZE, not 2MB, rewrite change log]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent cd65d697
......@@ -11,6 +11,7 @@
#include <linux/sched/mm.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
......@@ -110,6 +111,49 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
return 0;
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void radix__mark_rodata_ro(void)
{
unsigned long start = (unsigned long)_stext;
unsigned long end = (unsigned long)__init_begin;
unsigned long idx;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
start = ALIGN_DOWN(start, PAGE_SIZE);
end = PAGE_ALIGN(end); // aligns up
pr_devel("marking ro start %lx, end %lx\n", start, end);
for (idx = start; idx < end; idx += PAGE_SIZE) {
pgdp = pgd_offset_k(idx);
pudp = pud_alloc(&init_mm, pgdp, idx);
if (!pudp)
continue;
if (pud_huge(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(&init_mm, pudp, idx);
if (!pmdp)
continue;
if (pmd_huge(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
ptep = pte_alloc_kernel(pmdp, idx);
if (!ptep)
continue;
update_the_pte:
radix__pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0);
}
radix__flush_tlb_kernel_range(start, end);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
static inline void __meminit print_mapping(unsigned long start,
unsigned long end,
unsigned long size)
......@@ -125,6 +169,12 @@ static int __meminit create_physical_mapping(unsigned long start,
{
unsigned long vaddr, addr, mapping_size = 0;
pgprot_t prot;
unsigned long max_mapping_size;
#ifdef CONFIG_STRICT_KERNEL_RWX
int split_text_mapping = 1;
#else
int split_text_mapping = 0;
#endif
start = _ALIGN_UP(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
......@@ -133,9 +183,12 @@ static int __meminit create_physical_mapping(unsigned long start,
gap = end - addr;
previous_size = mapping_size;
max_mapping_size = PUD_SIZE;
retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
mmu_psize_defs[MMU_PAGE_1G].shift)
mmu_psize_defs[MMU_PAGE_1G].shift &&
PUD_SIZE <= max_mapping_size)
mapping_size = PUD_SIZE;
else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
mmu_psize_defs[MMU_PAGE_2M].shift)
......@@ -143,6 +196,18 @@ static int __meminit create_physical_mapping(unsigned long start,
else
mapping_size = PAGE_SIZE;
if (split_text_mapping && (mapping_size == PUD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
(addr + mapping_size) >= __pa_symbol(_stext)) {
max_mapping_size = PMD_SIZE;
goto retry;
}
if (split_text_mapping && (mapping_size == PMD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
(addr + mapping_size) >= __pa_symbol(_stext))
mapping_size = PAGE_SIZE;
if (mapping_size != previous_size) {
print_mapping(start, addr, previous_size);
start = addr;
......
......@@ -500,7 +500,9 @@ void mark_rodata_ro(void)
return;
}
if (!radix_enabled())
if (radix_enabled())
radix__mark_rodata_ro();
else
hash__mark_rodata_ro();
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment