Commit fbfef902 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Switch some TASK_SIZE checks to use mm_context addr_limit

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 82228e36
...@@ -52,7 +52,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -52,7 +52,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
if (len > TASK_SIZE) if (len > mm->context.addr_limit)
return -ENOMEM; return -ENOMEM;
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
...@@ -64,7 +64,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -64,7 +64,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) { if (addr) {
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (mm->context.addr_limit - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
......
...@@ -97,7 +97,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -97,7 +97,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr) if (len > mm->context.addr_limit - mmap_min_addr)
return -ENOMEM; return -ENOMEM;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
...@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
...@@ -114,7 +114,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -114,7 +114,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.flags = 0; info.flags = 0;
info.length = len; info.length = len;
info.low_limit = mm->mmap_base; info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE; info.high_limit = mm->context.addr_limit;
info.align_mask = 0; info.align_mask = 0;
return vm_unmapped_area(&info); return vm_unmapped_area(&info);
} }
...@@ -132,7 +132,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, ...@@ -132,7 +132,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr) if (len > mm->context.addr_limit - mmap_min_addr)
return -ENOMEM; return -ENOMEM;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
...@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, ...@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
...@@ -164,7 +164,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, ...@@ -164,7 +164,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
VM_BUG_ON(addr != -ENOMEM); VM_BUG_ON(addr != -ENOMEM);
info.flags = 0; info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE; info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE; info.high_limit = mm->context.addr_limit;
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
} }
......
...@@ -277,7 +277,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, ...@@ -277,7 +277,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
info.align_offset = 0; info.align_offset = 0;
addr = TASK_UNMAPPED_BASE; addr = TASK_UNMAPPED_BASE;
while (addr < TASK_SIZE) { while (addr < mm->context.addr_limit) {
info.low_limit = addr; info.low_limit = addr;
if (!slice_scan_available(addr, available, 1, &addr)) if (!slice_scan_available(addr, available, 1, &addr))
continue; continue;
...@@ -289,8 +289,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, ...@@ -289,8 +289,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
* Check if we need to reduce the range, or if we can * Check if we need to reduce the range, or if we can
* extend it to cover the next available slice. * extend it to cover the next available slice.
*/ */
if (addr >= TASK_SIZE) if (addr >= mm->context.addr_limit)
addr = TASK_SIZE; addr = mm->context.addr_limit;
else if (slice_scan_available(addr, available, 1, &next_end)) { else if (slice_scan_available(addr, available, 1, &next_end)) {
addr = next_end; addr = next_end;
goto next_slice; goto next_slice;
......
...@@ -197,7 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) ...@@ -197,7 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
/* Check parameters */ /* Check parameters */
if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE) addr >= mm->context.addr_limit || len >= mm->context.addr_limit ||
addr + len > mm->context.addr_limit)
return -EINVAL; return -EINVAL;
if (is_hugepage_only_range(mm, addr, len)) if (is_hugepage_only_range(mm, addr, len))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment