Commit faa8bf88 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt

Merge branch 'booke-hugetlb' into next

parents 48b1bf86 1f6820b4
...@@ -155,6 +155,7 @@ CONFIG_VFAT_FS=y ...@@ -155,6 +155,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y CONFIG_CRAMFS=y
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
......
...@@ -81,6 +81,7 @@ CONFIG_EXT3_FS=y ...@@ -81,6 +81,7 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
# CONFIG_MISC_FILESYSTEMS is not set # CONFIG_MISC_FILESYSTEMS is not set
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y CONFIG_MAC_PARTITION=y
......
...@@ -182,6 +182,7 @@ CONFIG_VFAT_FS=y ...@@ -182,6 +182,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_ADFS_FS=m CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m CONFIG_HFS_FS=m
......
...@@ -183,6 +183,7 @@ CONFIG_VFAT_FS=y ...@@ -183,6 +183,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_ADFS_FS=m CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m CONFIG_HFS_FS=m
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <asm/page.h> #include <asm/page.h>
extern struct kmem_cache *hugepte_cache; extern struct kmem_cache *hugepte_cache;
extern void __init reserve_hugetlb_gpages(void);
static inline pte_t *hugepd_page(hugepd_t hpd) static inline pte_t *hugepd_page(hugepd_t hpd)
{ {
...@@ -22,14 +21,14 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, ...@@ -22,14 +21,14 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
unsigned pdshift) unsigned pdshift)
{ {
/* /*
* On 32-bit, we have multiple higher-level table entries that point to * On FSL BookE, we have multiple higher-level table entries that
* the same hugepte. Just use the first one since they're all * point to the same hugepte. Just use the first one since they're all
* identical. So for that case, idx=0. * identical. So for that case, idx=0.
*/ */
unsigned long idx = 0; unsigned long idx = 0;
pte_t *dir = hugepd_page(*hpdp); pte_t *dir = hugepd_page(*hpdp);
#ifdef CONFIG_PPC64 #ifndef CONFIG_PPC_FSL_BOOK3E
idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
#endif #endif
...@@ -53,7 +52,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, ...@@ -53,7 +52,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
} }
#endif #endif
void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte); void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
...@@ -124,7 +124,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -124,7 +124,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t pte, int dirty) pte_t pte, int dirty)
{ {
#ifdef HUGETLB_NEED_PRELOAD
/*
* The "return 1" forces a call of update_mmu_cache, which will write a
* TLB entry. Without this, platforms that don't do a write of the TLB
* entry in the TLB miss handler asm will fault ad infinitum.
*/
ptep_set_access_flags(vma, addr, ptep, pte, dirty);
return 1;
#else
return ptep_set_access_flags(vma, addr, ptep, pte, dirty); return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
#endif
} }
static inline pte_t huge_ptep_get(pte_t *ptep) static inline pte_t huge_ptep_get(pte_t *ptep)
...@@ -142,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -142,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page)
} }
#else /* ! CONFIG_HUGETLB_PAGE */ #else /* ! CONFIG_HUGETLB_PAGE */
static inline void reserve_hugetlb_gpages(void)
{
pr_err("Cannot reserve gpages without hugetlb enabled\n");
}
static inline void flush_hugetlb_page(struct vm_area_struct *vma, static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr) unsigned long vmaddr)
{ {
} }
#endif /* CONFIG_HUGETLB_PAGE */
/*
* FSL Book3E platforms require special gpage handling - the gpages
* are reserved early in the boot process by memblock instead of via
* the .dts as on IBM platforms.
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
extern void __init reserve_hugetlb_gpages(void);
#else
static inline void reserve_hugetlb_gpages(void)
{
}
#endif #endif
#endif /* _ASM_POWERPC_HUGETLB_H */ #endif /* _ASM_POWERPC_HUGETLB_H */
...@@ -258,6 +258,13 @@ extern int mmu_vmemmap_psize; ...@@ -258,6 +258,13 @@ extern int mmu_vmemmap_psize;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
extern unsigned long linear_map_top; extern unsigned long linear_map_top;
/*
* 64-bit booke platforms don't load the tlb in the tlb miss handler code.
* HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
* return 1, indicating that the tlb requires preloading.
*/
#define HUGETLB_NEED_PRELOAD
#endif #endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -130,7 +130,9 @@ do { \ ...@@ -130,7 +130,9 @@ do { \
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#endif /* !CONFIG_HUGETLB_PAGE */ #endif /* !CONFIG_HUGETLB_PAGE */
......
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/kdump.h> #include <asm/kdump.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -64,6 +66,7 @@ ...@@ -64,6 +66,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/hugetlb.h>
#include "setup.h" #include "setup.h"
...@@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr)
/* Initialize the hash table or TLB handling */ /* Initialize the hash table or TLB handling */
early_init_mmu(); early_init_mmu();
/*
* Reserve any gigantic pages requested on the command line.
* memblock needs to have been initialized by the time this is
* called since this will reserve memory.
*/
reserve_hugetlb_gpages();
DBG(" <- early_setup()\n"); DBG(" <- early_setup()\n");
} }
......
...@@ -37,31 +37,32 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) ...@@ -37,31 +37,32 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
return found; return found;
} }
void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte) void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte)
{ {
unsigned long mas1, mas2; unsigned long mas1, mas2;
u64 mas7_3; u64 mas7_3;
unsigned long psize, tsize, shift; unsigned long psize, tsize, shift;
unsigned long flags; unsigned long flags;
struct mm_struct *mm;
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
int index, lz, ncams; int index, ncams;
struct vm_area_struct *vma;
#endif #endif
if (unlikely(is_kernel_addr(ea))) if (unlikely(is_kernel_addr(ea)))
return; return;
mm = vma->vm_mm;
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
psize = mmu_get_tsize(get_slice_psize(mm, ea)); psize = get_slice_psize(mm, ea);
tsize = mmu_get_psize(psize); tsize = mmu_get_tsize(psize);
shift = mmu_psize_defs[psize].shift; shift = mmu_psize_defs[psize].shift;
#else #else
vma = find_vma(mm, ea); psize = vma_mmu_pagesize(vma);
psize = vma_mmu_pagesize(vma); /* returns actual size in bytes */ shift = __ilog2(psize);
asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize)); tsize = shift - 10;
shift = 31 - lz;
tsize = 21 - lz;
#endif #endif
/* /*
......
...@@ -28,22 +28,22 @@ unsigned int HPAGE_SHIFT; ...@@ -28,22 +28,22 @@ unsigned int HPAGE_SHIFT;
/* /*
* Tracks gpages after the device tree is scanned and before the * Tracks gpages after the device tree is scanned and before the
* huge_boot_pages list is ready. On 64-bit implementations, this is * huge_boot_pages list is ready. On non-Freescale implementations, this is
* just used to track 16G pages and so is a single array. 32-bit * just used to track 16G pages and so is a single array. FSL-based
* implementations may have more than one gpage size due to limitations * implementations may have more than one gpage size, so we need multiple
* of the memory allocators, so we need multiple arrays * arrays
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_FSL_BOOK3E
#define MAX_NUMBER_GPAGES 1024
static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#else
#define MAX_NUMBER_GPAGES 128 #define MAX_NUMBER_GPAGES 128
struct psize_gpages { struct psize_gpages {
u64 gpage_list[MAX_NUMBER_GPAGES]; u64 gpage_list[MAX_NUMBER_GPAGES];
unsigned int nr_gpages; unsigned int nr_gpages;
}; };
static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
#else
#define MAX_NUMBER_GPAGES 1024
static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#endif #endif
static inline int shift_to_mmu_psize(unsigned int shift) static inline int shift_to_mmu_psize(unsigned int shift)
...@@ -114,12 +114,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -114,12 +114,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
struct kmem_cache *cachep; struct kmem_cache *cachep;
pte_t *new; pte_t *new;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_FSL_BOOK3E
cachep = PGT_CACHE(pdshift - pshift);
#else
int i; int i;
int num_hugepd = 1 << (pshift - pdshift); int num_hugepd = 1 << (pshift - pdshift);
cachep = hugepte_cache; cachep = hugepte_cache;
#else
cachep = PGT_CACHE(pdshift - pshift);
#endif #endif
new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
...@@ -131,12 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -131,12 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
return -ENOMEM; return -ENOMEM;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_FSL_BOOK3E
if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new);
else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#else
/* /*
* We have multiple higher-level entries that point to the same * We have multiple higher-level entries that point to the same
* actual pte location. Fill in each as we go and backtrack on error. * actual pte location. Fill in each as we go and backtrack on error.
...@@ -155,11 +150,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -155,11 +150,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
hpdp->pd = 0; hpdp->pd = 0;
kmem_cache_free(cachep, new); kmem_cache_free(cachep, new);
} }
#else
if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new);
else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif #endif
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 0; return 0;
} }
/*
* These macros define how to determine which level of the page table holds
* the hpdp.
*/
#ifdef CONFIG_PPC_FSL_BOOK3E
#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
#define HUGEPD_PUD_SHIFT PUD_SHIFT
#else
#define HUGEPD_PGD_SHIFT PUD_SHIFT
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{ {
pgd_t *pg; pgd_t *pg;
...@@ -172,12 +184,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -172,12 +184,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1); addr &= ~(sz-1);
pg = pgd_offset(mm, addr); pg = pgd_offset(mm, addr);
if (pshift >= PUD_SHIFT) {
if (pshift >= HUGEPD_PGD_SHIFT) {
hpdp = (hugepd_t *)pg; hpdp = (hugepd_t *)pg;
} else { } else {
pdshift = PUD_SHIFT; pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr); pu = pud_alloc(mm, pg, addr);
if (pshift >= PMD_SHIFT) { if (pshift >= HUGEPD_PUD_SHIFT) {
hpdp = (hugepd_t *)pu; hpdp = (hugepd_t *)pu;
} else { } else {
pdshift = PMD_SHIFT; pdshift = PMD_SHIFT;
...@@ -197,7 +210,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -197,7 +210,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(hpdp, addr, pdshift); return hugepte_offset(hpdp, addr, pdshift);
} }
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early /* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup. * boot before the buddy or bootmem allocator is setup.
*/ */
...@@ -317,7 +330,7 @@ void __init reserve_hugetlb_gpages(void) ...@@ -317,7 +330,7 @@ void __init reserve_hugetlb_gpages(void)
} }
} }
#else /* PPC64 */ #else /* !PPC_FSL_BOOK3E */
/* Build list of addresses of gigantic pages. This function is used in early /* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup. * boot before the buddy or bootmem allocator is setup.
...@@ -355,7 +368,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) ...@@ -355,7 +368,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0; return 0;
} }
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC_FSL_BOOK3E
#define HUGEPD_FREELIST_SIZE \ #define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
...@@ -415,11 +428,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif ...@@ -415,11 +428,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned long pdmask = ~((1UL << pdshift) - 1);
unsigned int num_hugepd = 1; unsigned int num_hugepd = 1;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_FSL_BOOK3E
unsigned int shift = hugepd_shift(*hpdp); /* Note: On fsl the hpdp may be the first of several */
#else
/* Note: On 32-bit the hpdp may be the first of several */
num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
#else
unsigned int shift = hugepd_shift(*hpdp);
#endif #endif
start &= pdmask; start &= pdmask;
...@@ -437,10 +450,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif ...@@ -437,10 +450,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
hpdp->pd = 0; hpdp->pd = 0;
tlb->need_flush = 1; tlb->need_flush = 1;
#ifdef CONFIG_PPC64
pgtable_free_tlb(tlb, hugepte, pdshift - shift); #ifdef CONFIG_PPC_FSL_BOOK3E
#else
hugepd_free(tlb, hugepte); hugepd_free(tlb, hugepte);
#else
pgtable_free_tlb(tlb, hugepte, pdshift - shift);
#endif #endif
} }
...@@ -453,14 +467,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, ...@@ -453,14 +467,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long start; unsigned long start;
start = addr; start = addr;
pmd = pmd_offset(pud, addr);
do { do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) if (pmd_none(*pmd))
continue; continue;
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
#endif
free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} while (pmd++, addr = next, addr != end); } while (addr = next, addr != end);
start &= PUD_MASK; start &= PUD_MASK;
if (start < floor) if (start < floor)
...@@ -487,8 +510,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -487,8 +510,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start; unsigned long start;
start = addr; start = addr;
pud = pud_offset(pgd, addr);
do { do {
pud = pud_offset(pgd, addr);
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (!is_hugepd(pud)) { if (!is_hugepd(pud)) {
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
...@@ -496,10 +519,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -496,10 +519,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling); ceiling);
} else { } else {
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
#endif
free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} }
} while (pud++, addr = next, addr != end); } while (addr = next, addr != end);
start &= PGDIR_MASK; start &= PGDIR_MASK;
if (start < floor) if (start < floor)
...@@ -554,12 +586,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, ...@@ -554,12 +586,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
continue; continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} else { } else {
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC_FSL_BOOK3E
/* /*
* Increment next by the size of the huge mapping since * Increment next by the size of the huge mapping since
* on 32-bit there may be more than one entry at the pgd * there may be more than one entry at the pgd level
* level for a single hugepage, but all of them point to * for a single hugepage, but all of them point to the
* the same kmem cache that holds the hugepte. * same kmem cache that holds the hugepte.
*/ */
next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
#endif #endif
...@@ -697,19 +729,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, ...@@ -697,19 +729,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
return 1; return 1;
} }
#ifdef CONFIG_PPC_MM_SLICES
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long len, unsigned long pgoff,
unsigned long flags) unsigned long flags)
{ {
#ifdef CONFIG_PPC_MM_SLICES
struct hstate *hstate = hstate_file(file); struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
#else
return get_unmapped_area(file, addr, len, pgoff, flags);
#endif
} }
#endif
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{ {
...@@ -783,7 +813,7 @@ static int __init hugepage_setup_sz(char *str) ...@@ -783,7 +813,7 @@ static int __init hugepage_setup_sz(char *str)
} }
__setup("hugepagesz=", hugepage_setup_sz); __setup("hugepagesz=", hugepage_setup_sz);
#ifdef CONFIG_FSL_BOOKE #ifdef CONFIG_PPC_FSL_BOOK3E
struct kmem_cache *hugepte_cache; struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void) static int __init hugetlbpage_init(void)
{ {
......
...@@ -553,7 +553,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -553,7 +553,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
&& defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_HUGETLB_PAGE)
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma->vm_mm, address, *ptep); book3e_hugetlb_preload(vma, address, *ptep);
#endif #endif
} }
......
...@@ -94,11 +94,11 @@ ...@@ -94,11 +94,11 @@
srdi r15,r16,60 /* get region */ srdi r15,r16,60 /* get region */
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
bne- dtlb_miss_fault_bolted bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */
rlwinm r10,r11,32-19,27,27 rlwinm r10,r11,32-19,27,27
rlwimi r10,r11,32-16,19,19 rlwimi r10,r11,32-16,19,19
cmpwi r15,0 cmpwi r15,0 /* user vs kernel check */
ori r10,r10,_PAGE_PRESENT ori r10,r10,_PAGE_PRESENT
oris r11,r10,_PAGE_ACCESSED@h oris r11,r10,_PAGE_ACCESSED@h
...@@ -120,44 +120,38 @@ tlb_miss_common_bolted: ...@@ -120,44 +120,38 @@ tlb_miss_common_bolted:
rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
cmpldi cr0,r14,0 cmpldi cr0,r14,0
clrrdi r15,r15,3 clrrdi r15,r15,3
beq tlb_miss_fault_bolted beq tlb_miss_fault_bolted /* No PGDIR, bail */
BEGIN_MMU_FTR_SECTION BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load /* Set the TLB reservation and search for existing entry. Then load
* the entry. * the entry.
*/ */
PPC_TLBSRX_DOT(0,r16) PPC_TLBSRX_DOT(0,r16)
ldx r14,r14,r15 ldx r14,r14,r15 /* grab pgd entry */
beq normal_tlb_miss_done beq normal_tlb_miss_done /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
#ifndef CONFIG_PPC_64K_PAGES #ifndef CONFIG_PPC_64K_PAGES
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3 clrrdi r15,r15,3
cmpdi cr0,r14,0
cmpldi cr0,r14,0 bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
beq tlb_miss_fault_bolted ldx r14,r14,r15 /* grab pud entry */
ldx r14,r14,r15
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_64K_PAGES */
rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
clrrdi r15,r15,3 clrrdi r15,r15,3
cmpdi cr0,r14,0
cmpldi cr0,r14,0 bge tlb_miss_fault_bolted
beq tlb_miss_fault_bolted ldx r14,r14,r15 /* Grab pmd entry */
ldx r14,r14,r15
rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
clrrdi r15,r15,3 clrrdi r15,r15,3
cmpdi cr0,r14,0
cmpldi cr0,r14,0 bge tlb_miss_fault_bolted
beq tlb_miss_fault_bolted ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */
ldx r14,r14,r15
/* Check if required permissions are met */ /* Check if required permissions are met */
andc. r15,r11,r14 andc. r15,r11,r14
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
* indirect page table entries. * indirect page table entries.
*/ */
#ifdef CONFIG_PPC_BOOK3E_MMU #ifdef CONFIG_PPC_BOOK3E_MMU
#ifdef CONFIG_FSL_BOOKE #ifdef CONFIG_PPC_FSL_BOOK3E
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = { [MMU_PAGE_4K] = {
.shift = 12, .shift = 12,
......
...@@ -174,7 +174,6 @@ config BOOKE ...@@ -174,7 +174,6 @@ config BOOKE
config FSL_BOOKE config FSL_BOOKE
bool bool
depends on (E200 || E500) && PPC32 depends on (E200 || E500) && PPC32
select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT
default y default y
# this is for common code between PPC32 & PPC64 FSL BOOKE # this is for common code between PPC32 & PPC64 FSL BOOKE
...@@ -182,6 +181,7 @@ config PPC_FSL_BOOK3E ...@@ -182,6 +181,7 @@ config PPC_FSL_BOOK3E
bool bool
select FSL_EMB_PERFMON select FSL_EMB_PERFMON
select PPC_SMP_MUXED_IPI select PPC_SMP_MUXED_IPI
select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
default y if FSL_BOOKE default y if FSL_BOOKE
config PTE_64BIT config PTE_64BIT
...@@ -309,7 +309,7 @@ config PPC_BOOK3E_MMU ...@@ -309,7 +309,7 @@ config PPC_BOOK3E_MMU
config PPC_MM_SLICES config PPC_MM_SLICES
bool bool
default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES) default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
default n default n
config VIRT_CPU_ACCOUNTING config VIRT_CPU_ACCOUNTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment