Commit ec97d022 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/kasan: Declare kasan_init_region() weak

In order to alloc sub-arches to alloc KASAN regions using optimised
methods (Huge pages on 8xx, BATs on BOOK3S, ...), declare
kasan_init_region() weak.

Also make kasan_init_shadow_page_tables() accessible from outside,
so that it can be called from the specific kasan_init_region()
functions if needed.

And populate remaining KASAN address space only once performed
the region mapping, to allow 8xx to allocate hugepd instead of
standard page tables for mapping via 8M hugepages.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/3c1ce419fa1b5a4171b92d7fb16455ca17e1b96d.1589866984.git.christophe.leroy@csgroup.eu
parent 7dec42ab
...@@ -34,5 +34,8 @@ static inline void kasan_init(void) { } ...@@ -34,5 +34,8 @@ static inline void kasan_init(void) { }
static inline void kasan_late_init(void) { } static inline void kasan_late_init(void) { }
#endif #endif
int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end);
int kasan_init_region(void *start, size_t size);
#endif /* __ASSEMBLY */ #endif /* __ASSEMBLY */
#endif #endif
...@@ -28,7 +28,7 @@ static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot) ...@@ -28,7 +28,7 @@ static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
} }
static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long k_cur, k_next; unsigned long k_cur, k_next;
...@@ -52,7 +52,7 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned ...@@ -52,7 +52,7 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
return 0; return 0;
} }
static int __init kasan_init_region(void *start, size_t size) int __init __weak kasan_init_region(void *start, size_t size)
{ {
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
...@@ -122,14 +122,6 @@ static void __init kasan_mmu_init(void) ...@@ -122,14 +122,6 @@ static void __init kasan_mmu_init(void)
int ret; int ret;
struct memblock_region *reg; struct memblock_region *reg;
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
if (ret)
panic("kasan: kasan_init_shadow_page_tables() failed");
}
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
phys_addr_t base = reg->base; phys_addr_t base = reg->base;
phys_addr_t top = min(base + reg->size, total_lowmem); phys_addr_t top = min(base + reg->size, total_lowmem);
...@@ -141,6 +133,15 @@ static void __init kasan_mmu_init(void) ...@@ -141,6 +133,15 @@ static void __init kasan_mmu_init(void)
if (ret) if (ret)
panic("kasan: kasan_init_region() failed"); panic("kasan: kasan_init_region() failed");
} }
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
if (ret)
panic("kasan: kasan_init_shadow_page_tables() failed");
}
} }
void __init kasan_init(void) void __init kasan_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment