Commit a1cd5419 authored by Becky Bruce's avatar Becky Bruce Committed by Benjamin Herrenschmidt

powerpc: Update hugetlb huge_pte_alloc and tablewalk code for FSL BOOKE

This updates the hugetlb page table code to handle 64-bit FSL_BOOKE.
The previous 32-bit work counted on the inner levels of the page table
collapsing.
Signed-off-by: default avatarBecky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 8c1674de
...@@ -155,11 +155,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -155,11 +155,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
hpdp->pd = 0; hpdp->pd = 0;
kmem_cache_free(cachep, new); kmem_cache_free(cachep, new);
} }
#else
if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new);
else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif #endif
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 0; return 0;
} }
/*
* These macros define how to determine which level of the page table holds
* the hpdp.
*/
#ifdef CONFIG_PPC_FSL_BOOK3E
#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
#define HUGEPD_PUD_SHIFT PUD_SHIFT
#else
#define HUGEPD_PGD_SHIFT PUD_SHIFT
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{ {
pgd_t *pg; pgd_t *pg;
...@@ -172,12 +189,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -172,12 +189,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1); addr &= ~(sz-1);
pg = pgd_offset(mm, addr); pg = pgd_offset(mm, addr);
if (pshift >= PUD_SHIFT) {
if (pshift >= HUGEPD_PGD_SHIFT) {
hpdp = (hugepd_t *)pg; hpdp = (hugepd_t *)pg;
} else { } else {
pdshift = PUD_SHIFT; pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr); pu = pud_alloc(mm, pg, addr);
if (pshift >= PMD_SHIFT) { if (pshift >= HUGEPD_PUD_SHIFT) {
hpdp = (hugepd_t *)pu; hpdp = (hugepd_t *)pu;
} else { } else {
pdshift = PMD_SHIFT; pdshift = PMD_SHIFT;
...@@ -453,14 +471,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, ...@@ -453,14 +471,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long start; unsigned long start;
start = addr; start = addr;
pmd = pmd_offset(pud, addr);
do { do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) if (pmd_none(*pmd))
continue; continue;
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
#endif
free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} while (pmd++, addr = next, addr != end); } while (addr = next, addr != end);
start &= PUD_MASK; start &= PUD_MASK;
if (start < floor) if (start < floor)
...@@ -487,8 +514,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -487,8 +514,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start; unsigned long start;
start = addr; start = addr;
pud = pud_offset(pgd, addr);
do { do {
pud = pud_offset(pgd, addr);
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (!is_hugepd(pud)) { if (!is_hugepd(pud)) {
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
...@@ -496,10 +523,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -496,10 +523,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling); ceiling);
} else { } else {
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
#endif
free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} }
} while (pud++, addr = next, addr != end); } while (addr = next, addr != end);
start &= PGDIR_MASK; start &= PGDIR_MASK;
if (start < floor) if (start < floor)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment