Commit d4870b89 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Only 8M pages are hugepte pages now

512k pages are now standard pages, so only 8M pages
are hugepte.

No more handling of normal page tables through hugepd allocation
and freeing, and hugepte helpers can also be simplified.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/2c6135d57fb76eebf70673fbac3dc9e740767879.1589866984.git.christophe.leroy@csgroup.eu
parent b250c8c0
...@@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd) ...@@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
static inline unsigned int hugepd_shift(hugepd_t hpd) static inline unsigned int hugepd_shift(hugepd_t hpd)
{ {
return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; return PAGE_SHIFT_8M;
} }
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned int pdshift) unsigned int pdshift)
{ {
unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT; unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT;
return hugepd_page(hpd) + idx; return hugepd_page(hpd) + idx;
} }
...@@ -32,8 +32,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, ...@@ -32,8 +32,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{ {
*hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
(pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K));
} }
static inline int check_and_get_huge_psize(int shift) static inline int check_and_get_huge_psize(int shift)
......
...@@ -54,23 +54,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -54,23 +54,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (pshift >= pdshift) { if (pshift >= pdshift) {
cachep = PGT_CACHE(PTE_T_ORDER); cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift); num_hugepd = 1 << (pshift - pdshift);
new = NULL;
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
cachep = NULL;
num_hugepd = 1;
new = pte_alloc_one(mm);
} else { } else {
cachep = PGT_CACHE(pdshift - pshift); cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1; num_hugepd = 1;
new = NULL;
} }
if (!cachep && !new) { if (!cachep) {
WARN_ONCE(1, "No page table cache created for hugetlb tables"); WARN_ONCE(1, "No page table cache created for hugetlb tables");
return -ENOMEM; return -ENOMEM;
} }
if (cachep)
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON(pshift > HUGEPD_SHIFT_MASK);
...@@ -102,10 +95,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -102,10 +95,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (i < num_hugepd) { if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--) for (i = i - 1 ; i >= 0; i--, hpdp--)
*hpdp = __hugepd(0); *hpdp = __hugepd(0);
if (cachep)
kmem_cache_free(cachep, new); kmem_cache_free(cachep, new);
else
pte_free(mm, new);
} else { } else {
kmemleak_ignore(new); kmemleak_ignore(new);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment