Commit f8b46c4b authored by Anshuman Khandual's avatar Anshuman Khandual Committed by Will Deacon

arm64/mm: Add pud_sect_supported()

Section mapping at PUD level is supported only on 4K pages and currently it
gets verified with explicit #ifdef or IS_ENABLED() constructs. This adds a
new helper pud_sect_supported() for this purpose, which particularly cleans
up the HugeTLB code path. It updates relevant switch statements with checks
for __PAGETABLE_PMD_FOLDED in order to avoid build failures caused with two
identical switch case values in those code blocks.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarAnshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/1632130171-472-1-git-send-email-anshuman.khandual@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent e63cf610
...@@ -1022,6 +1022,11 @@ static inline pgprot_t arch_filter_pgprot(pgprot_t prot) ...@@ -1022,6 +1022,11 @@ static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
return PAGE_READONLY_EXEC; return PAGE_READONLY_EXEC;
} }
static inline bool pud_sect_supported(void)
{
return PAGE_SIZE == SZ_4K;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
...@@ -9,10 +10,9 @@ ...@@ -9,10 +10,9 @@
static inline bool arch_vmap_pud_supported(pgprot_t prot) static inline bool arch_vmap_pud_supported(pgprot_t prot)
{ {
/* /*
* Only 4k granule supports level 1 block mappings.
* SW table walks can't handle removal of intermediate entries. * SW table walks can't handle removal of intermediate entries.
*/ */
return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && return pud_sect_supported() &&
!IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
} }
......
...@@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void) ...@@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
{ {
int order; int order;
#ifdef CONFIG_ARM64_4K_PAGES if (pud_sect_supported())
order = PUD_SHIFT - PAGE_SHIFT; order = PUD_SHIFT - PAGE_SHIFT;
#else else
order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT; order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
#endif
/* /*
* HugeTLB CMA reservation is required for gigantic * HugeTLB CMA reservation is required for gigantic
* huge pages which could not be allocated via the * huge pages which could not be allocated via the
...@@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h) ...@@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
size_t pagesize = huge_page_size(h); size_t pagesize = huge_page_size(h);
switch (pagesize) { switch (pagesize) {
#ifdef CONFIG_ARM64_4K_PAGES #ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE: case PUD_SIZE:
return pud_sect_supported();
#endif #endif
case PMD_SIZE: case PMD_SIZE:
case CONT_PMD_SIZE: case CONT_PMD_SIZE:
...@@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) ...@@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = size; *pgsize = size;
switch (size) { switch (size) {
#ifdef CONFIG_ARM64_4K_PAGES #ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE: case PUD_SIZE:
if (pud_sect_supported())
contig_ptes = 1;
break;
#endif #endif
case PMD_SIZE: case PMD_SIZE:
contig_ptes = 1; contig_ptes = 1;
...@@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, ...@@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
static int __init hugetlbpage_init(void) static int __init hugetlbpage_init(void)
{ {
#ifdef CONFIG_ARM64_4K_PAGES if (pud_sect_supported())
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
#endif
hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
...@@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init); ...@@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
bool __init arch_hugetlb_valid_size(unsigned long size) bool __init arch_hugetlb_valid_size(unsigned long size)
{ {
switch (size) { switch (size) {
#ifdef CONFIG_ARM64_4K_PAGES #ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE: case PUD_SIZE:
return pud_sect_supported();
#endif #endif
case CONT_PMD_SIZE: case CONT_PMD_SIZE:
case PMD_SIZE: case PMD_SIZE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment