Commit 49c18faa authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] From: David Gibson <david@gibson.dropbear.id.au>

hugepage_vma() is both misleadingly named and unnecessary.  On most archs it
always returns NULL, and on IA64 the vma it returns is never used.  The
function's real purpose is to determine whether the address it is passed is a
special hugepage address which must be looked up in hugepage pagetables,
rather than being looked up in the normal pagetables (which might have
specially marked hugepage PMDs or PTEs).

This patch kills off hugepage_vma() and folds the logic it really needs into
follow_huge_addr().  That now returns a (page *) if called on a special
hugepage address, and an error encoded with ERR_PTR otherwise.  This also
requires tweaking the IA64 code to check that the hugepage PTE is present in
follow_huge_addr() - previously this was guaranteed, since it was only called
if the address was in an existing hugepage VMA, and hugepages are always
prefaulted.
parent 3d9d1320
...@@ -140,32 +140,31 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -140,32 +140,31 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
#if 0 /* This is just for testing */ #if 0 /* This is just for testing */
struct page * struct page *
follow_huge_addr(struct mm_struct *mm, follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
struct vm_area_struct *vma, unsigned long address, int write)
{ {
unsigned long start = address; unsigned long start = address;
int length = 1; int length = 1;
int nr; int nr;
struct page *page; struct page *page;
struct vm_area_struct *vma;
nr = follow_hugetlb_page(mm, vma, &page, NULL, &start, &length, 0); if (! mm->used_hugetlb)
if (nr == 1) return ERR_PTR(-EINVAL);
return page;
return NULL;
}
/* vma = find_vma(mm, addr);
* If virtual address `addr' lies within a huge page, return its controlling if (!vma || !is_vm_hugetlb_page(vma))
* VMA, else NULL. return ERR_PTR(-EINVAL);
*/
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr) pte = huge_pte_offset(mm, address);
{
if (mm->used_hugetlb) { /* hugetlb should be locked, and hence, prefaulted */
struct vm_area_struct *vma = find_vma(mm, addr); WARN_ON(!pte || pte_none(*pte));
if (vma && is_vm_hugetlb_page(vma))
return vma; page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
}
return NULL; WARN_ON(!PageCompound(page));
return page;
} }
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
...@@ -183,15 +182,9 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -183,15 +182,9 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
#else #else
struct page * struct page *
follow_huge_addr(struct mm_struct *mm, follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
struct vm_area_struct *vma, unsigned long address, int write)
{ {
return NULL; return ERR_PTR(-EINVAL);
}
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
{
return NULL;
} }
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
......
...@@ -49,8 +49,12 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) ...@@ -49,8 +49,12 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr); pgd = pgd_offset(mm, taddr);
pmd = pmd_offset(pgd, taddr); if (pgd_present(*pgd)) {
pte = pte_offset_map(pmd, taddr); pmd = pmd_offset(pgd, taddr);
if (pmd_present(*pmd))
pte = pte_offset_map(pmd, taddr);
}
return pte; return pte;
} }
...@@ -150,24 +154,19 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -150,24 +154,19 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i; return i;
} }
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr) struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
{
if (mm->used_hugetlb) {
if (REGION_NUMBER(addr) == REGION_HPAGE) {
struct vm_area_struct *vma = find_vma(mm, addr);
if (vma && is_vm_hugetlb_page(vma))
return vma;
}
}
return NULL;
}
struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int write)
{ {
struct page *page; struct page *page;
pte_t *ptep; pte_t *ptep;
if (! mm->used_hugetlb)
return ERR_PTR(-EINVAL);
if (REGION_NUMBER(addr) != REGION_HPAGE)
return ERR_PTR(-EINVAL);
ptep = huge_pte_offset(mm, addr); ptep = huge_pte_offset(mm, addr);
if (!ptep || pte_none(*ptep))
return NULL;
page = pte_page(*ptep); page = pte_page(*ptep);
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
return page; return page;
......
...@@ -335,15 +335,9 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -335,15 +335,9 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
struct page * struct page *
follow_huge_addr(struct mm_struct *mm, follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
struct vm_area_struct *vma, unsigned long address, int write)
{ {
return NULL; return ERR_PTR(-EINVAL);
}
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
{
return NULL;
} }
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
......
...@@ -166,15 +166,9 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -166,15 +166,9 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
struct page *follow_huge_addr(struct mm_struct *mm, struct page *follow_huge_addr(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address, int write) unsigned long address, int write)
{ {
return NULL; return ERR_PTR(-EINVAL);
}
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
{
return NULL;
} }
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
......
...@@ -164,15 +164,9 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -164,15 +164,9 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
struct page *follow_huge_addr(struct mm_struct *mm, struct page *follow_huge_addr(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address, int write) unsigned long address, int write)
{ {
return NULL; return ERR_PTR(-EINVAL);
}
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
{
return NULL;
} }
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
......
...@@ -20,10 +20,8 @@ void huge_page_release(struct page *); ...@@ -20,10 +20,8 @@ void huge_page_release(struct page *);
int hugetlb_report_meminfo(char *); int hugetlb_report_meminfo(char *);
int is_hugepage_mem_enough(size_t); int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void); unsigned long hugetlb_total_pages(void);
struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
unsigned long address, int write); int write);
struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
unsigned long address);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write); pmd_t *pmd, int write);
int is_aligned_hugepage_range(unsigned long addr, unsigned long len); int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
...@@ -65,7 +63,7 @@ static inline unsigned long hugetlb_total_pages(void) ...@@ -65,7 +63,7 @@ static inline unsigned long hugetlb_total_pages(void)
} }
#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; }) #define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
#define follow_huge_addr(mm, vma, addr, write) 0 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define zap_hugepage_range(vma, start, len) BUG() #define zap_hugepage_range(vma, start, len) BUG()
...@@ -73,7 +71,6 @@ static inline unsigned long hugetlb_total_pages(void) ...@@ -73,7 +71,6 @@ static inline unsigned long hugetlb_total_pages(void)
#define huge_page_release(page) BUG() #define huge_page_release(page) BUG()
#define is_hugepage_mem_enough(size) 0 #define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_meminfo(buf) 0
#define hugepage_vma(mm, addr) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0) #define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0 #define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0 #define is_aligned_hugepage_range(addr, len) 0
......
...@@ -657,11 +657,11 @@ follow_page(struct mm_struct *mm, unsigned long address, int write) ...@@ -657,11 +657,11 @@ follow_page(struct mm_struct *mm, unsigned long address, int write)
pmd_t *pmd; pmd_t *pmd;
pte_t *ptep, pte; pte_t *ptep, pte;
unsigned long pfn; unsigned long pfn;
struct vm_area_struct *vma; struct page *page;
vma = hugepage_vma(mm, address); page = follow_huge_addr(mm, address, write);
if (vma) if (! IS_ERR(page))
return follow_huge_addr(mm, vma, address, write); return page;
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd)) if (pgd_none(*pgd) || pgd_bad(*pgd))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment