Commit 0b1c524c authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32: refactor pmd_offset(pud_offset(pgd_offset...

At several places pmd pointer is retrieved through the same action:

	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);

or

	pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);

Refactor this by implementing two helpers pmd_ptr() and pmd_ptr_k()

This will help when adding the p4d level.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/7b065c5be35726af4066cab238ee35cabceda1fa.1578558199.git.christophe.leroy@c-s.fr
parent 05642cf7
...@@ -41,6 +41,18 @@ struct mm_struct; ...@@ -41,6 +41,18 @@ struct mm_struct;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_PPC32
static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
{
return pmd_offset(pud_offset(pgd_offset(mm, va), va), va);
}
static inline pmd_t *pmd_ptr_k(unsigned long va)
{
return pmd_offset(pud_offset(pgd_offset_k(va), va), va);
}
#endif
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/* Keep these as a macros to avoid include dependency mess */ /* Keep these as a macros to avoid include dependency mess */
......
...@@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) ...@@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea)
if (!Hash) if (!Hash)
return; return;
pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); pmd = pmd_ptr(mm, ea);
if (!pmd_none(*pmd)) if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd)); add_hash_page(mm->context.id, ea, pmd_val(*pmd));
} }
......
...@@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, ...@@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
if (start >= end) if (start >= end)
return; return;
end = (end - 1) | ~PAGE_MASK; end = (end - 1) | ~PAGE_MASK;
pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); pmd = pmd_ptr(mm, start);
for (;;) { for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end) if (pmd_end > end)
...@@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
return; return;
} }
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); pmd = pmd_ptr(mm, vmaddr);
if (!pmd_none(*pmd)) if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
} }
......
...@@ -36,7 +36,7 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned ...@@ -36,7 +36,7 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
unsigned long k_cur, k_next; unsigned long k_cur, k_next;
pte_t *new = NULL; pte_t *new = NULL;
pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start); pmd = pmd_ptr_k(k_start);
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
k_next = pgd_addr_end(k_cur, k_end); k_next = pgd_addr_end(k_cur, k_end);
...@@ -78,7 +78,7 @@ static int __init kasan_init_region(void *start, size_t size) ...@@ -78,7 +78,7 @@ static int __init kasan_init_region(void *start, size_t size)
block = memblock_alloc(k_end - k_start, PAGE_SIZE); block = memblock_alloc(k_end - k_start, PAGE_SIZE);
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pmd_t *pmd = pmd_ptr_k(k_cur);
void *va = block + k_cur - k_start; void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
...@@ -102,7 +102,7 @@ static void __init kasan_remap_early_shadow_ro(void) ...@@ -102,7 +102,7 @@ static void __init kasan_remap_early_shadow_ro(void)
kasan_populate_pte(kasan_early_shadow_pte, prot); kasan_populate_pte(kasan_early_shadow_pte, prot);
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pmd_t *pmd = pmd_ptr_k(k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur);
if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
...@@ -202,7 +202,7 @@ void __init kasan_early_init(void) ...@@ -202,7 +202,7 @@ void __init kasan_early_init(void)
unsigned long addr = KASAN_SHADOW_START; unsigned long addr = KASAN_SHADOW_START;
unsigned long end = KASAN_SHADOW_END; unsigned long end = KASAN_SHADOW_END;
unsigned long next; unsigned long next;
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr); pmd_t *pmd = pmd_ptr_k(addr);
BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK); BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
......
...@@ -69,8 +69,7 @@ EXPORT_SYMBOL(kmap_prot); ...@@ -69,8 +69,7 @@ EXPORT_SYMBOL(kmap_prot);
static inline pte_t *virt_to_kpte(unsigned long vaddr) static inline pte_t *virt_to_kpte(unsigned long vaddr)
{ {
return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), return pte_offset_kernel(pmd_ptr_k(vaddr), vaddr);
vaddr), vaddr), vaddr);
} }
#endif #endif
......
...@@ -104,7 +104,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -104,7 +104,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pmd_t *pmdp; pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmdp = pmd_ptr_k(v);
*pmdp++ = __pmd(val); *pmdp++ = __pmd(val);
*pmdp++ = __pmd(val); *pmdp++ = __pmd(val);
*pmdp++ = __pmd(val); *pmdp++ = __pmd(val);
...@@ -119,7 +119,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -119,7 +119,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pmd_t *pmdp; pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmdp = pmd_ptr_k(v);
*pmdp = __pmd(val); *pmdp = __pmd(val);
v += LARGE_PAGE_SIZE_4M; v += LARGE_PAGE_SIZE_4M;
......
...@@ -63,7 +63,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) ...@@ -63,7 +63,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
int err = -ENOMEM; int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */ /* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); pd = pmd_ptr_k(va);
/* Use middle 10 bits of VA to index the second-level map */ /* Use middle 10 bits of VA to index the second-level map */
if (likely(slab_is_available())) if (likely(slab_is_available()))
pg = pte_alloc_kernel(pd, va); pg = pte_alloc_kernel(pd, va);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment