Commit 3f28b857 authored by Yanmin Zhang's avatar Yanmin Zhang Committed by Linus Torvalds

[PATCH] hugetlb_get_unmapped_area fix

hugetlb_get_unmapped_area() fails to find an unmapped area while unmapped
area is huge.  That's because hugetlb_get_unmapped_area just searches
forward from mm->free_area_cache.  If reaching TASK_SIZE, it does not go
back to TASK_UNMAPPED_BASE, just returns -ENOMEM.

1) Add a specific hugetlb_get_unmapped_area on i386.
2) Generic hugetlb_get_unmapped_area is also fixed.
Signed-off-by: default avatarZhang Yanmin <yanmin.zhang@intel.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e1c5245a
...@@ -282,3 +282,143 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -282,3 +282,143 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return ret; return ret;
} }
/* x86_64 also uses this file */
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
start_addr = mm->free_area_cache;
full_search:
addr = ALIGN(start_addr, HPAGE_SIZE);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
mm->free_area_cache = addr + len;
return addr;
}
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
}
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long addr0, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev_vma;
unsigned long base = mm->mmap_base, addr = addr0;
int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
mm->free_area_cache = base;
try_again:
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
goto fail;
/* either no address requested or cant fit in requested address hole */
addr = (mm->free_area_cache - len) & HPAGE_MASK;
do {
/*
* Lookup failure means no vma is above this address,
* i.e. return with success:
*/
if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
return addr;
/*
* new region fits between prev_vma->vm_end and
* vma->vm_start, use it:
*/
if (addr + len <= vma->vm_start &&
(!prev_vma || (addr >= prev_vma->vm_end)))
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
else
/* pull free_area_cache down to the first hole */
if (mm->free_area_cache == vma->vm_end)
mm->free_area_cache = vma->vm_start;
/* try just below the current vma->vm_start */
addr = (vma->vm_start - len) & HPAGE_MASK;
} while (len <= vma->vm_start);
fail:
/*
* if hint left us with no space for the requested
* mapping then try again:
*/
if (first_time) {
mm->free_area_cache = base;
first_time = 0;
goto try_again;
}
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = base;
return addr;
}
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (len & ~HPAGE_MASK)
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
...@@ -105,6 +105,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -105,6 +105,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long start_addr;
if (len & ~HPAGE_MASK) if (len & ~HPAGE_MASK)
return -EINVAL; return -EINVAL;
...@@ -119,12 +120,25 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -119,12 +120,25 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return addr; return addr;
} }
addr = ALIGN(mm->free_area_cache, HPAGE_SIZE); start_addr = mm->free_area_cache;
full_search:
addr = ALIGN(start_addr, HPAGE_SIZE);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */ /* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM; return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) if (!vma || addr + len <= vma->vm_start)
return addr; return addr;
addr = ALIGN(vma->vm_end, HPAGE_SIZE); addr = ALIGN(vma->vm_end, HPAGE_SIZE);
......
...@@ -63,6 +63,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -63,6 +63,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment