Commit 712fa5f2 authored by Alexander Gordeev's avatar Alexander Gordeev Committed by Vasily Gorbik

s390/mm: cleanup arch_get_unmapped_area() and friends

Factor out check_asce_limit() function and fix few style
defects in arch_get_unmapped_area() family of functions.
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
[heiko.carstens@de.ibm.com: small coding style changes]
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 194f7570
...@@ -48,6 +48,20 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm) ...@@ -48,6 +48,20 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
void crst_table_downgrade(struct mm_struct *); void crst_table_downgrade(struct mm_struct *);
static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
int rc;
if (addr + len > mm->context.asce_limit &&
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
}
return addr;
}
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
unsigned long *table = crst_table_alloc(mm); unsigned long *table = crst_table_alloc(mm);
......
...@@ -326,7 +326,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -326,7 +326,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *h = hstate_file(file); struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int rc;
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
...@@ -353,15 +352,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -353,15 +352,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
else else
addr = hugetlb_get_unmapped_area_topdown(file, addr, len, addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags); pgoff, flags);
if (addr & ~PAGE_MASK) if (offset_in_page(addr))
return addr; return addr;
check_asce_limit: check_asce_limit:
if (addr + len > current->mm->context.asce_limit && return check_asce_limit(mm, addr, len);
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
}
return addr;
} }
...@@ -72,14 +72,13 @@ static inline unsigned long mmap_base(unsigned long rnd, ...@@ -72,14 +72,13 @@ static inline unsigned long mmap_base(unsigned long rnd,
return PAGE_ALIGN(STACK_TOP - gap - rnd); return PAGE_ALIGN(STACK_TOP - gap - rnd);
} }
unsigned long unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long flags)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
int rc;
if (len > TASK_SIZE - mmap_min_addr) if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM; return -ENOMEM;
...@@ -105,30 +104,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -105,30 +104,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.align_mask = 0; info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT; info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
if (addr & ~PAGE_MASK) if (offset_in_page(addr))
return addr; return addr;
check_asce_limit: check_asce_limit:
if (addr + len > current->mm->context.asce_limit && return check_asce_limit(mm, addr, len);
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
}
return addr;
} }
unsigned long unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long len, unsigned long pgoff,
const unsigned long len, const unsigned long pgoff, unsigned long flags)
const unsigned long flags)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
int rc;
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr) if (len > TASK_SIZE - mmap_min_addr)
...@@ -163,25 +152,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -163,25 +152,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* can happen with large stack limits and large mmap() * can happen with large stack limits and large mmap()
* allocations. * allocations.
*/ */
if (addr & ~PAGE_MASK) { if (offset_in_page(addr)) {
VM_BUG_ON(addr != -ENOMEM); VM_BUG_ON(addr != -ENOMEM);
info.flags = 0; info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE; info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE; info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
if (addr & ~PAGE_MASK) if (offset_in_page(addr))
return addr; return addr;
} }
check_asce_limit: check_asce_limit:
if (addr + len > current->mm->context.asce_limit && return check_asce_limit(mm, addr, len);
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
}
return addr;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment