Commit ef26b76d authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA

commit: cf11e85f ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
added support for allocating gigantic hugepages using CMA. This patch
enables the same for powerpc
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200713150749.25245-1-aneesh.kumar@linux.ibm.com
parent 81a41325
...@@ -57,6 +57,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -57,6 +57,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t pte, int dirty); pte_t pte, int dirty);
void gigantic_hugetlb_cma_reserve(void) __init;
#include <asm-generic/hugetlb.h> #include <asm-generic/hugetlb.h>
#else /* ! CONFIG_HUGETLB_PAGE */ #else /* ! CONFIG_HUGETLB_PAGE */
...@@ -71,6 +72,12 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, ...@@ -71,6 +72,12 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
{ {
return NULL; return NULL;
} }
static inline void __init gigantic_hugetlb_cma_reserve(void)
{
}
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
#endif /* _ASM_POWERPC_HUGETLB_H */ #endif /* _ASM_POWERPC_HUGETLB_H */
...@@ -928,6 +928,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -928,6 +928,9 @@ void __init setup_arch(char **cmdline_p)
/* Reserve large chunks of memory for use by CMA for KVM. */ /* Reserve large chunks of memory for use by CMA for KVM. */
kvm_cma_reserve(); kvm_cma_reserve();
/* Reserve large chunks of memory for us by CMA for hugetlb */
gigantic_hugetlb_cma_reserve();
klp_init_thread_info(&init_task); klp_init_thread_info(&init_task);
init_mm.start_code = (unsigned long)_stext; init_mm.start_code = (unsigned long)_stext;
......
...@@ -684,3 +684,21 @@ void flush_dcache_icache_hugepage(struct page *page) ...@@ -684,3 +684,21 @@ void flush_dcache_icache_hugepage(struct page *page)
} }
} }
} }
void __init gigantic_hugetlb_cma_reserve(void)
{
unsigned long order = 0;
if (radix_enabled())
order = PUD_SHIFT - PAGE_SHIFT;
else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
/*
* For pseries we do use ibm,expected#pages for reserving 16G pages.
*/
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
if (order) {
VM_WARN_ON(order < MAX_ORDER);
hugetlb_cma_reserve(order);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment