Commit 17ff6820 authored by David Gibson's avatar David Gibson Committed by Linus Torvalds

[PATCH] ppc64: rework hugepage code

Rework the ppc64 hugepage code.  Instead of using specially marked pmd
entries in the normal pagetables to represent hugepages, use normal pte_t
entries, in a special set of pagetables used for hugepages only.

Using pte_t instead of a special hugepte_t makes the code more similar to
that for other architecturess, allowing more possibilities for
consolidating the hugepage code.

Using independent pagetables for the hugepages is also a prerequisite for
moving the hugepages into their own region well outside the normal user
address space.  The restrictions imposed by the powerpc mmu's segment
design mean we probably want to do that in the fairly near future.
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bdef750d
......@@ -341,9 +341,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
int local)
{
unsigned long vsid, vpn, va, hash, secondary, slot;
/* XXX fix for large ptes */
unsigned long large = 0;
unsigned long huge = pte_huge(pte);
if ((ea >= USER_START) && (ea <= USER_END))
vsid = get_vsid(context, ea);
......@@ -351,18 +349,18 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
if (large)
if (huge)
vpn = va >> HPAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, large);
hash = hpt_hash(vpn, huge);
secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
if (secondary)
hash = ~hash;
slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
ppc_md.hpte_invalidate(slot, va, large, local);
ppc_md.hpte_invalidate(slot, va, huge, local);
}
void flush_hash_range(unsigned long context, unsigned long number, int local)
......
This diff is collapsed.
......@@ -478,6 +478,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
int index;
int err;
#ifdef CONFIG_HUGETLB_PAGE
/* We leave htlb_segs as it was, but for a fork, we need to
* clear the huge_pgdir. */
mm->context.huge_pgdir = NULL;
#endif
again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
return -ENOMEM;
......@@ -508,6 +514,8 @@ void destroy_context(struct mm_struct *mm)
spin_unlock(&mmu_context_lock);
mm->context.id = NO_CONTEXT;
hugetlb_mm_free_pgd(mm);
}
static int __init mmu_context_init(void)
......
......@@ -24,6 +24,7 @@ typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
#ifdef CONFIG_HUGETLB_PAGE
pgd_t *huge_pgdir;
u16 htlb_segs; /* bitmask */
#endif
} mm_context_t;
......
......@@ -64,7 +64,6 @@
#define is_hugepage_only_range(addr, len) \
(touches_hugepage_high_range((addr), (len)) || \
touches_hugepage_low_range((addr), (len)))
#define hugetlb_free_pgtables free_pgtables
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \
......
......@@ -98,6 +98,7 @@
#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
#define _PAGE_HUGE 0x10000 /* 16MB page */
/* Bits 0x7000 identify the index within an HPT Group */
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX)
/* PAGE_MASK gives the right answer below, but only by accident */
......@@ -157,19 +158,19 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#endif /* __ASSEMBLY__ */
/* shift to put page number into pte */
#define PTE_SHIFT (16)
#define PTE_SHIFT (17)
/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
* to give the PTE page number. The bottom two bits are for flags. */
#define PMD_TO_PTEPAGE_SHIFT (2)
#ifdef CONFIG_HUGETLB_PAGE
#define _PMD_HUGEPAGE 0x00000001U
#define HUGEPTE_BATCH_SIZE (1<<(HPAGE_SHIFT-PMD_SHIFT))
#ifndef __ASSEMBLY__
int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long ea, unsigned long vsid, int local);
void hugetlb_mm_free_pgd(struct mm_struct *mm);
#endif /* __ASSEMBLY__ */
#define HAVE_ARCH_UNMAPPED_AREA
......@@ -177,7 +178,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
#else
#define hash_huge_page(mm,a,ea,vsid,local) -1
#define _PMD_HUGEPAGE 0
#define hugetlb_mm_free_pgd(mm) do {} while (0)
#endif
......@@ -213,10 +214,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
#define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_hugepage(pmd) (!!(pmd_val(pmd) & _PMD_HUGEPAGE))
#define pmd_bad(pmd) (((pmd_val(pmd)) == 0) || pmd_hugepage(pmd))
#define pmd_present(pmd) ((!pmd_hugepage(pmd)) \
&& (pmd_val(pmd) & ~_PMD_HUGEPAGE) != 0)
#define pmd_bad(pmd) (pmd_val(pmd) == 0)
#define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_kernel(pmd) \
(__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
......@@ -269,6 +268,7 @@ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;}
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
......@@ -294,6 +294,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkhuge(pte_t pte) {
pte_val(pte) |= _PAGE_HUGE; return pte; }
/* Atomic PTE updates */
static inline unsigned long pte_update(pte_t *p, unsigned long clr)
......@@ -464,6 +466,10 @@ extern pgd_t ioremap_dir[1024];
extern void paging_init(void);
struct mmu_gather;
void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment