Commit fdbef614 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Linus Torvalds

mm/mremap: don't account pages in vma_to_resize()

All this vm_unacct_memory(charged) dance seems to complicate the life
without a good reason.  Furthermore, it seems not always done right on
error-pathes in mremap_to().  And worse than that: this `charged'
difference is sometimes double-accounted for growing MREMAP_DONTUNMAP
mremap()s in move_vma():

	if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))

Let's not do this.  Account memory in mremap() fast-path for growing
VMAs or in move_vma() for actually moving things.  The same simpler way
as it's done by vm_stat_account(), but with a difference to call
security_vm_enough_memory_mm() before copying/adjusting VMA.

Originally noticed by Chen Wandun:
https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com

Link: https://lkml.kernel.org/r/20210721131320.522061-1-dima@arista.com
Fixes: e346b381 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
Signed-off-by: default avatarDmitry Safonov <dima@arista.com>
Acked-by: default avatarBrian Geffon <bgeffon@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Wandun <chenwandun@huawei.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yongjun <weiyongjun1@huawei.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6af5fa0d
...@@ -565,6 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -565,6 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
bool *locked, unsigned long flags, bool *locked, unsigned long flags,
struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
{ {
long to_account = new_len - old_len;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma; struct vm_area_struct *new_vma;
unsigned long vm_flags = vma->vm_flags; unsigned long vm_flags = vma->vm_flags;
...@@ -583,6 +584,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -583,6 +584,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (mm->map_count >= sysctl_max_map_count - 3) if (mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM; return -ENOMEM;
if (unlikely(flags & MREMAP_DONTUNMAP))
to_account = new_len;
if (vma->vm_ops && vma->vm_ops->may_split) { if (vma->vm_ops && vma->vm_ops->may_split) {
if (vma->vm_start != old_addr) if (vma->vm_start != old_addr)
err = vma->vm_ops->may_split(vma, old_addr); err = vma->vm_ops->may_split(vma, old_addr);
...@@ -604,8 +608,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -604,8 +608,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (err) if (err)
return err; return err;
if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) { if (vm_flags & VM_ACCOUNT) {
if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT)) if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
return -ENOMEM; return -ENOMEM;
} }
...@@ -613,8 +617,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -613,8 +617,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
&need_rmap_locks); &need_rmap_locks);
if (!new_vma) { if (!new_vma) {
if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) if (vm_flags & VM_ACCOUNT)
vm_unacct_memory(new_len >> PAGE_SHIFT); vm_unacct_memory(to_account >> PAGE_SHIFT);
return -ENOMEM; return -ENOMEM;
} }
...@@ -708,8 +712,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -708,8 +712,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
} }
static struct vm_area_struct *vma_to_resize(unsigned long addr, static struct vm_area_struct *vma_to_resize(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long old_len, unsigned long new_len, unsigned long flags)
unsigned long *p)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -768,13 +771,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, ...@@ -768,13 +771,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
(new_len - old_len) >> PAGE_SHIFT)) (new_len - old_len) >> PAGE_SHIFT))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (vma->vm_flags & VM_ACCOUNT) {
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(mm, charged))
return ERR_PTR(-ENOMEM);
*p = charged;
}
return vma; return vma;
} }
...@@ -787,7 +783,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, ...@@ -787,7 +783,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long ret = -EINVAL; unsigned long ret = -EINVAL;
unsigned long charged = 0;
unsigned long map_flags = 0; unsigned long map_flags = 0;
if (offset_in_page(new_addr)) if (offset_in_page(new_addr))
...@@ -830,7 +825,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, ...@@ -830,7 +825,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
old_len = new_len; old_len = new_len;
} }
vma = vma_to_resize(addr, old_len, new_len, flags, &charged); vma = vma_to_resize(addr, old_len, new_len, flags);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto out; goto out;
...@@ -853,7 +848,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, ...@@ -853,7 +848,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
((addr - vma->vm_start) >> PAGE_SHIFT), ((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags); map_flags);
if (IS_ERR_VALUE(ret)) if (IS_ERR_VALUE(ret))
goto out1; goto out;
/* We got a new mapping */ /* We got a new mapping */
if (!(flags & MREMAP_FIXED)) if (!(flags & MREMAP_FIXED))
...@@ -862,12 +857,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, ...@@ -862,12 +857,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
uf_unmap); uf_unmap);
if (!(offset_in_page(ret)))
goto out;
out1:
vm_unacct_memory(charged);
out: out:
return ret; return ret;
} }
...@@ -899,7 +888,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, ...@@ -899,7 +888,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long ret = -EINVAL; unsigned long ret = -EINVAL;
unsigned long charged = 0;
bool locked = false; bool locked = false;
bool downgraded = false; bool downgraded = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
...@@ -981,7 +969,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, ...@@ -981,7 +969,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
/* /*
* Ok, we need to grow.. * Ok, we need to grow..
*/ */
vma = vma_to_resize(addr, old_len, new_len, flags, &charged); vma = vma_to_resize(addr, old_len, new_len, flags);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto out; goto out;
...@@ -992,10 +980,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, ...@@ -992,10 +980,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (old_len == vma->vm_end - addr) { if (old_len == vma->vm_end - addr) {
/* can we just expand the current mapping? */ /* can we just expand the current mapping? */
if (vma_expandable(vma, new_len - old_len)) { if (vma_expandable(vma, new_len - old_len)) {
int pages = (new_len - old_len) >> PAGE_SHIFT; long pages = (new_len - old_len) >> PAGE_SHIFT;
if (vma->vm_flags & VM_ACCOUNT) {
if (security_vm_enough_memory_mm(mm, pages)) {
ret = -ENOMEM;
goto out;
}
}
if (vma_adjust(vma, vma->vm_start, addr + new_len, if (vma_adjust(vma, vma->vm_start, addr + new_len,
vma->vm_pgoff, NULL)) { vma->vm_pgoff, NULL)) {
vm_unacct_memory(pages);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -1034,10 +1030,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, ...@@ -1034,10 +1030,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
&locked, flags, &uf, &uf_unmap); &locked, flags, &uf, &uf_unmap);
} }
out: out:
if (offset_in_page(ret)) { if (offset_in_page(ret))
vm_unacct_memory(charged);
locked = false; locked = false;
}
if (downgraded) if (downgraded)
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment