Commit 3c8016e6 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Refactor __kernel_map_pages()

__kernel_map_pages() is almost identical for PPC32 and RADIX.

Refactor it.

On PPC32 it is not needed for KFENCE, but to keep it simple
just make it similar to PPC64.

Move the prototype of hash__kernel_map_pages() into mmu_decl.h to allow
IS_ENABLED() to work on 32-bit.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/3656d47c53bff577739dac536dbae31fff52f6d8.1708078640.git.christophe.leroy@csgroup.eu
parent f7f18e30
......@@ -269,8 +269,6 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int hash__remove_section_mapping(unsigned long start, unsigned long end);
void hash__kernel_map_pages(struct page *page, int numpages, int enable);
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
......@@ -1027,16 +1027,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (radix_enabled())
radix__kernel_map_pages(page, numpages, enable);
else
hash__kernel_map_pages(page, numpages, enable);
}
#endif
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte_raw(pmd_raw(pmd));
......
......@@ -362,8 +362,6 @@ int radix__create_section_mapping(unsigned long start, unsigned long end,
int radix__remove_section_mapping(unsigned long start, unsigned long end);
#endif /* CONFIG_MEMORY_HOTPLUG */
void radix__kernel_map_pages(struct page *page, int numpages, int enable);
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
#define vmemmap_can_optimize vmemmap_can_optimize
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap);
......
......@@ -1339,20 +1339,6 @@ void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
#endif
#endif
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr;
addr = (unsigned long)page_address(page);
if (enable)
set_memory_p(addr, numpages);
else
set_memory_np(addr, numpages);
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
......
......@@ -186,3 +186,5 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
int create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
#endif
void hash__kernel_map_pages(struct page *page, int numpages, int enable);
......@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <mm/mmu_decl.h>
static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr,
unsigned long old, unsigned long new)
......@@ -101,3 +102,22 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
return apply_to_existing_page_range(&init_mm, start, size,
change_page_attr, (void *)action);
}
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr = (unsigned long)page_address(page);
if (PageHighMem(page))
return;
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
hash__kernel_map_pages(page, numpages, enable);
else if (enable)
set_memory_p(addr, numpages);
else
set_memory_np(addr, numpages);
}
#endif
#endif
......@@ -171,18 +171,3 @@ void mark_rodata_ro(void)
ptdump_check_wx();
}
#endif
#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr = (unsigned long)page_address(page);
if (PageHighMem(page))
return;
if (enable)
set_memory_p(addr, numpages);
else
set_memory_np(addr, numpages);
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment