Commit cf9427b8 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt

powerpc: New hugepage directory format

Change the hugepage directory format so that we can have leaf ptes directly
at page directory avoiding the allocation of hugepage directory.

With the new table format we have 3 cases for pgds and pmds:
(1) invalid (all zeroes)
(2) pointer to next table, as normal; bottom 6 bits == 0
(4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table

Instead of storing shift value in hugepd pointer we use mmu_psize_def index
so that we can fit all the supported hugepage size in 4 bits
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 0e5f35d0
...@@ -6,6 +6,33 @@ ...@@ -6,6 +6,33 @@
extern struct kmem_cache *hugepte_cache; extern struct kmem_cache *hugepte_cache;
#ifdef CONFIG_PPC_BOOK3S_64
/*
* This should work for other subarchs too. But right now we use the
* new format only for 64bit book3s
*/
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
/*
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
}
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
{
return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
}
#else
static inline pte_t *hugepd_page(hugepd_t hpd) static inline pte_t *hugepd_page(hugepd_t hpd)
{ {
BUG_ON(!hugepd_ok(hpd)); BUG_ON(!hugepd_ok(hpd));
...@@ -17,6 +44,9 @@ static inline unsigned int hugepd_shift(hugepd_t hpd) ...@@ -17,6 +44,9 @@ static inline unsigned int hugepd_shift(hugepd_t hpd)
return hpd.pd & HUGEPD_SHIFT_MASK; return hpd.pd & HUGEPD_SHIFT_MASK;
} }
#endif /* CONFIG_PPC_BOOK3S_64 */
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
unsigned pdshift) unsigned pdshift)
{ {
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* complete pgtable.h but only a portion of it. * complete pgtable.h but only a portion of it.
*/ */
#include <asm/pgtable-ppc64.h> #include <asm/pgtable-ppc64.h>
#include <asm/bug.h>
/* /*
* Segment table * Segment table
...@@ -159,6 +160,24 @@ struct mmu_psize_def ...@@ -159,6 +160,24 @@ struct mmu_psize_def
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
}; };
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
static inline int shift_to_mmu_psize(unsigned int shift)
{
int psize;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
if (mmu_psize_defs[psize].shift == shift)
return psize;
return -1;
}
static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
if (mmu_psize_defs[mmu_psize].shift)
return mmu_psize_defs[mmu_psize].shift;
BUG();
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
...@@ -193,7 +212,6 @@ static inline int segment_shift(int ssize) ...@@ -193,7 +212,6 @@ static inline int segment_shift(int ssize)
/* /*
* The current system page and segment sizes * The current system page and segment sizes
*/ */
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize; extern int mmu_linear_psize;
extern int mmu_virtual_psize; extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize; extern int mmu_vmalloc_psize;
......
...@@ -249,6 +249,7 @@ extern long long virt_phys_offset; ...@@ -249,6 +249,7 @@ extern long long virt_phys_offset;
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET) #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
#endif #endif
#ifndef CONFIG_PPC_BOOK3S_64
/* /*
* Use the top bit of the higher-level page table entries to indicate whether * Use the top bit of the higher-level page table entries to indicate whether
* the entries we point to contain hugepages. This works because we know that * the entries we point to contain hugepages. This works because we know that
...@@ -260,6 +261,7 @@ extern long long virt_phys_offset; ...@@ -260,6 +261,7 @@ extern long long virt_phys_offset;
#else #else
#define PD_HUGE 0x80000000 #define PD_HUGE 0x80000000
#endif #endif
#endif /* CONFIG_PPC_BOOK3S_64 */
/* /*
* Some number of bits at the level of the page table that points to * Some number of bits at the level of the page table that points to
...@@ -354,10 +356,21 @@ typedef unsigned long pgprot_t; ...@@ -354,10 +356,21 @@ typedef unsigned long pgprot_t;
typedef struct { signed long pd; } hugepd_t; typedef struct { signed long pd; } hugepd_t;
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_PPC_BOOK3S_64
static inline int hugepd_ok(hugepd_t hpd)
{
/*
* hugepd pointer, bottom two bits == 00 and next 4 bits
* indicate size of table
*/
return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
}
#else
static inline int hugepd_ok(hugepd_t hpd) static inline int hugepd_ok(hugepd_t hpd)
{ {
return (hpd.pd > 0); return (hpd.pd > 0);
} }
#endif
#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
#else /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_HUGETLB_PAGE */
......
...@@ -35,7 +35,10 @@ struct vmemmap_backing { ...@@ -35,7 +35,10 @@ struct vmemmap_backing {
#define MAX_PGTABLE_INDEX_SIZE 0xf #define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[]; extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) #define PGT_CACHE(shift) ({ \
BUG_ON(!(shift)); \
pgtable_cache[(shift) - 1]; \
})
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
......
...@@ -48,23 +48,6 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES]; ...@@ -48,23 +48,6 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages; static unsigned nr_gpages;
#endif #endif
static inline int shift_to_mmu_psize(unsigned int shift)
{
int psize;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
if (mmu_psize_defs[psize].shift == shift)
return psize;
return -1;
}
static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
if (mmu_psize_defs[mmu_psize].shift)
return mmu_psize_defs[mmu_psize].shift;
BUG();
}
#define hugepd_none(hpd) ((hpd).pd == 0) #define hugepd_none(hpd) ((hpd).pd == 0)
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
...@@ -145,6 +128,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -145,6 +128,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (unlikely(!hugepd_none(*hpdp))) if (unlikely(!hugepd_none(*hpdp)))
break; break;
else else
/* We use the old format for PPC_FSL_BOOK3E */
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
} }
/* If we bailed from the for loop early, an error occurred, clean up */ /* If we bailed from the for loop early, an error occurred, clean up */
...@@ -156,8 +140,14 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -156,8 +140,14 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#else #else
if (!hugepd_none(*hpdp)) if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new); kmem_cache_free(cachep, new);
else else {
#ifdef CONFIG_PPC_BOOK3S_64
hpdp->pd = (unsigned long)new |
(shift_to_mmu_psize(pshift) << 2);
#else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif
}
#endif #endif
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 0; return 0;
......
...@@ -129,8 +129,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) ...@@ -129,8 +129,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign); align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor); new = kmem_cache_create(name, table_size, align, 0, ctor);
PGT_CACHE(shift) = new; pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift); pr_debug("Allocated pgtable cache for order %d\n", shift);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment