Commit 0c22e4b2 authored by Christophe Leroy's avatar Christophe Leroy Committed by Andrew Morton

powerpc/mm: remove hugepd leftovers

All targets have now opted out of CONFIG_ARCH_HAS_HUGEPD so remove left
over code.

Link: https://lkml.kernel.org/r/39c0d0adee6790fc42cee9f458e05fb95136c3dd.1719928057.git.christophe.leroy@csgroup.euSigned-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 57fb15c3
...@@ -30,13 +30,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, ...@@ -30,13 +30,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
} }
#define is_hugepage_only_range is_hugepage_only_range #define is_hugepage_only_range is_hugepage_only_range
#ifdef CONFIG_ARCH_HAS_HUGEPD
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
#endif
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned long sz); pte_t pte, unsigned long sz);
......
...@@ -269,12 +269,6 @@ static inline const void *pfn_to_kaddr(unsigned long pfn) ...@@ -269,12 +269,6 @@ static inline const void *pfn_to_kaddr(unsigned long pfn)
#define is_kernel_addr(x) ((x) >= TASK_SIZE) #define is_kernel_addr(x) ((x) >= TASK_SIZE)
#endif #endif
/*
* Some number of bits at the level of the page table that points to
* a hugepte are used to encode the size. This masks those bits.
*/
#define HUGEPD_SHIFT_MASK 0x3f
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
......
...@@ -101,14 +101,4 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new) ...@@ -101,14 +101,4 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
return pmd_raw(old) == prev; return pmd_raw(old) == prev;
} }
#ifdef CONFIG_ARCH_HAS_HUGEPD
typedef struct { __be64 pdbe; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
static inline unsigned long hpd_val(hugepd_t x)
{
return be64_to_cpu(x.pdbe);
}
#endif
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
...@@ -87,13 +87,4 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) ...@@ -87,13 +87,4 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
} }
#endif #endif
#ifdef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
static inline unsigned long hpd_val(hugepd_t x)
{
return x.pd;
}
#endif
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
This diff is collapsed.
...@@ -120,12 +120,8 @@ void pgtable_cache_add(unsigned int shift) ...@@ -120,12 +120,8 @@ void pgtable_cache_add(unsigned int shift)
/* When batching pgtable pointers for RCU freeing, we store /* When batching pgtable pointers for RCU freeing, we store
* the index size in the low bits. Table alignment must be * the index size in the low bits. Table alignment must be
* big enough to fit it. * big enough to fit it.
* */
* Likewise, hugeapge pagetable pointers contain a (different) unsigned long minalign = MAX_PGTABLE_INDEX_SIZE + 1;
* shift value in the low bits. All tables must be aligned so
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
struct kmem_cache *new = NULL; struct kmem_cache *new = NULL;
/* It would be nice if this was a BUILD_BUG_ON(), but at the /* It would be nice if this was a BUILD_BUG_ON(), but at the
......
...@@ -409,11 +409,10 @@ unsigned long vmalloc_to_phys(void *va) ...@@ -409,11 +409,10 @@ unsigned long vmalloc_to_phys(void *va)
EXPORT_SYMBOL_GPL(vmalloc_to_phys); EXPORT_SYMBOL_GPL(vmalloc_to_phys);
/* /*
* We have 4 cases for pgds and pmds: * We have 3 cases for pgds and pmds:
* (1) invalid (all zeroes) * (1) invalid (all zeroes)
* (2) pointer to next table, as normal; bottom 6 bits == 0 * (2) pointer to next table, as normal; bottom 6 bits == 0
* (3) leaf pte for huge page _PAGE_PTE set * (3) leaf pte for huge page _PAGE_PTE set
* (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
* *
* So long as we atomically load page table pointers we are safe against teardown, * So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the page and take a ref on it. * we can follow the address down to the page and take a ref on it.
...@@ -430,7 +429,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ...@@ -430,7 +429,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
#endif #endif
pmd_t pmd, *pmdp; pmd_t pmd, *pmdp;
pte_t *ret_pte; pte_t *ret_pte;
hugepd_t *hpdp = NULL;
unsigned pdshift; unsigned pdshift;
if (hpage_shift) if (hpage_shift)
...@@ -463,11 +461,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ...@@ -463,11 +461,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
goto out; goto out;
} }
if (is_hugepd(__hugepd(p4d_val(p4d)))) {
hpdp = (hugepd_t *)&p4d;
goto out_huge;
}
/* /*
* Even if we end up with an unmap, the pgtable will not * Even if we end up with an unmap, the pgtable will not
* be freed, because we do an rcu free and here we are * be freed, because we do an rcu free and here we are
...@@ -485,11 +478,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ...@@ -485,11 +478,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
goto out; goto out;
} }
if (is_hugepd(__hugepd(pud_val(pud)))) {
hpdp = (hugepd_t *)&pud;
goto out_huge;
}
pmdp = pmd_offset(&pud, ea); pmdp = pmd_offset(&pud, ea);
#else #else
pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea); pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea);
...@@ -527,21 +515,8 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ...@@ -527,21 +515,8 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
goto out; goto out;
} }
if (is_hugepd(__hugepd(pmd_val(pmd)))) {
hpdp = (hugepd_t *)&pmd;
goto out_huge;
}
return pte_offset_kernel(&pmd, ea); return pte_offset_kernel(&pmd, ea);
out_huge:
if (!hpdp)
return NULL;
#ifdef CONFIG_ARCH_HAS_HUGEPD
ret_pte = hugepte_offset(*hpdp, ea, pdshift);
pdshift = hugepd_shift(*hpdp);
#endif
out: out:
if (hpage_shift) if (hpage_shift)
*hpage_shift = pdshift; *hpage_shift = pdshift;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment