Commit d38c229c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] hugetlbpages: factor out some code for hugetlbfs

In order for hugetlbfs to operate, prefaulting the vma at mmap()-time
while simultaneously instantiating and performing lookups on its
ratcache entries is needed as an isolated operation.  This is
implemented as part of a different function within hugetlbpage.c that
ties it to inode and key lookup and allocation.

The following patch simply moves the code already present into its own
function, calls it, and makes it available for hugetlbfs to use.
parent e66c772c
......@@ -404,25 +404,10 @@ alloc_shared_hugetlb_pages(int key, unsigned long addr, unsigned long len,
goto freeinode;
}
spin_lock(&mm->page_table_lock);
do {
pte_t *pte = huge_pte_alloc(mm, addr);
if ((pte) && (pte_none(*pte))) {
idx = (addr - vma->vm_start) >> HPAGE_SHIFT;
page = find_get_page(mapping, idx);
if (page == NULL) {
page = alloc_hugetlb_page();
if (page == NULL)
goto out;
add_to_page_cache(page, mapping, idx);
}
set_huge_pte(mm, vma, page, pte,
(vma->vm_flags & VM_WRITE));
} else
retval = hugetlb_prefault(mapping, vma);
if (retval)
goto out;
addr += HPAGE_SIZE;
} while (addr < vma->vm_end);
retval = 0;
vma->vm_flags |= (VM_HUGETLB | VM_RESERVED);
vma->vm_ops = &hugetlb_vm_ops;
spin_unlock(&mm->page_table_lock);
......@@ -457,6 +442,46 @@ out_err: spin_unlock(&htlbpage_lock);
return retval;
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
int ret = 0;
BUG_ON(vma->vm_start & ~HPAGE_MASK);
BUG_ON(vma->vm_end & ~HPAGE_MASK);
spin_lock(&mm->page_table_lock);
for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
unsigned long idx;
pte_t *pte = huge_pte_alloc(mm, addr);
struct page *page;
if (!pte) {
ret = -ENOMEM;
goto out;
}
if (!pte_none(*pte))
continue;
idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
page = find_get_page(mapping, idx);
if (!page) {
page = alloc_hugetlb_page();
if (!page) {
ret = -ENOMEM;
goto out;
}
add_to_page_cache(page, mapping, idx);
}
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
}
out:
spin_unlock(&mm->page_table_lock);
return ret;
}
static int
alloc_private_hugetlb_pages(int key, unsigned long addr, unsigned long len,
int prot, int flag)
......
......@@ -381,7 +381,7 @@ int __set_page_dirty_nobuffers(struct page *page);
extern int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
extern int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
extern int free_hugepages(struct vm_area_struct *);
extern int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
#else
#define is_vm_hugetlb_page(vma) (0)
#define follow_hugetlb_page(mm, vma, pages, vmas, start, len, i) (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment