Commit 17f1ae9b authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm/vma: change munmap to use vma_munmap_struct() for accounting and surrounding vmas

Clean up the code by changing the munmap operation to use a structure for
the accounting and munmap variables.

Since remove_mt() is only called in one location and the contents will be
reduced to almost nothing.  The remains of the function can be added to
vms_complete_munmap_vmas().

Link: https://lkml.kernel.org/r/20240830040101.822209-7-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: default avatarLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: default avatarSuren Baghdasaryan <surenb@google.com>
Cc: Bert Karwatzki <spasswolf@web.de>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Jiri Olsa <olsajiri@gmail.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Paul Moore <paul@paul-moore.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent dba14840
...@@ -103,7 +103,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms, ...@@ -103,7 +103,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
vms->unlock = unlock; vms->unlock = unlock;
vms->uf = uf; vms->uf = uf;
vms->vma_count = 0; vms->vma_count = 0;
vms->nr_pages = vms->locked_vm = 0; vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
} }
/* /*
...@@ -299,30 +300,6 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -299,30 +300,6 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
return __split_vma(vmi, vma, addr, new_below); return __split_vma(vmi, vma, addr, new_below);
} }
/*
* Ok - we have the memory areas we should free on a maple tree so release them,
* and do the vma updates.
*
* Called with the mm semaphore held.
*/
static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
{
unsigned long nr_accounted = 0;
struct vm_area_struct *vma;
/* Update high watermark before we lower total_vm */
update_hiwater_vm(mm);
mas_for_each(mas, vma, ULONG_MAX) {
long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, -nrpages);
remove_vma(vma, false);
}
vm_unacct_memory(nr_accounted);
}
/* /*
* init_vma_prep() - Initializer wrapper for vma_prepare struct * init_vma_prep() - Initializer wrapper for vma_prepare struct
* @vp: The vma_prepare struct * @vp: The vma_prepare struct
...@@ -722,7 +699,7 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach) ...@@ -722,7 +699,7 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach) struct ma_state *mas_detach)
{ {
struct vm_area_struct *prev, *next; struct vm_area_struct *vma;
struct mm_struct *mm; struct mm_struct *mm;
mm = vms->mm; mm = vms->mm;
...@@ -731,21 +708,31 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, ...@@ -731,21 +708,31 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
if (vms->unlock) if (vms->unlock)
mmap_write_downgrade(mm); mmap_write_downgrade(mm);
prev = vma_iter_prev_range(vms->vmi);
next = vma_next(vms->vmi);
if (next)
vma_iter_prev_range(vms->vmi);
/* /*
* We can free page tables without write-locking mmap_lock because VMAs * We can free page tables without write-locking mmap_lock because VMAs
* were isolated before we downgraded mmap_lock. * were isolated before we downgraded mmap_lock.
*/ */
mas_set(mas_detach, 1); mas_set(mas_detach, 1);
unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end, unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
vms->vma_count, !vms->unlock); vms->start, vms->end, vms->vma_count, !vms->unlock);
/* Statistics and freeing VMAs */ /* Update high watermark before we lower total_vm */
update_hiwater_vm(mm);
/* Stat accounting */
WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
/* Paranoid bookkeeping */
VM_WARN_ON(vms->exec_vm > mm->exec_vm);
VM_WARN_ON(vms->stack_vm > mm->stack_vm);
VM_WARN_ON(vms->data_vm > mm->data_vm);
mm->exec_vm -= vms->exec_vm;
mm->stack_vm -= vms->stack_vm;
mm->data_vm -= vms->data_vm;
/* Remove and clean up vmas */
mas_set(mas_detach, 0); mas_set(mas_detach, 0);
remove_mt(mm, mas_detach); mas_for_each(mas_detach, vma, ULONG_MAX)
remove_vma(vma, false);
vm_unacct_memory(vms->nr_accounted);
validate_mm(mm); validate_mm(mm);
if (vms->unlock) if (vms->unlock)
mmap_read_unlock(mm); mmap_read_unlock(mm);
...@@ -798,18 +785,19 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, ...@@ -798,18 +785,19 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
if (__split_vma(vms->vmi, vms->vma, vms->start, 1)) if (__split_vma(vms->vmi, vms->vma, vms->start, 1))
goto start_split_failed; goto start_split_failed;
} }
vms->prev = vma_prev(vms->vmi);
/* /*
* Detach a range of VMAs from the mm. Using next as a temp variable as * Detach a range of VMAs from the mm. Using next as a temp variable as
* it is always overwritten. * it is always overwritten.
*/ */
next = vms->vma; for_each_vma_range(*(vms->vmi), next, vms->end) {
do { long nrpages;
if (!can_modify_vma(next)) { if (!can_modify_vma(next)) {
error = -EPERM; error = -EPERM;
goto modify_vma_failed; goto modify_vma_failed;
} }
/* Does it split the end? */ /* Does it split the end? */
if (next->vm_end > vms->end) { if (next->vm_end > vms->end) {
if (__split_vma(vms->vmi, next, vms->end, 0)) if (__split_vma(vms->vmi, next, vms->end, 0))
...@@ -821,8 +809,21 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, ...@@ -821,8 +809,21 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
goto munmap_gather_failed; goto munmap_gather_failed;
vma_mark_detached(next, true); vma_mark_detached(next, true);
nrpages = vma_pages(next);
vms->nr_pages += nrpages;
if (next->vm_flags & VM_LOCKED) if (next->vm_flags & VM_LOCKED)
vms->locked_vm += vma_pages(next); vms->locked_vm += nrpages;
if (next->vm_flags & VM_ACCOUNT)
vms->nr_accounted += nrpages;
if (is_exec_mapping(next->vm_flags))
vms->exec_vm += nrpages;
else if (is_stack_mapping(next->vm_flags))
vms->stack_vm += nrpages;
else if (is_data_mapping(next->vm_flags))
vms->data_vm += nrpages;
if (unlikely(vms->uf)) { if (unlikely(vms->uf)) {
/* /*
...@@ -842,7 +843,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, ...@@ -842,7 +843,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
BUG_ON(next->vm_start < vms->start); BUG_ON(next->vm_start < vms->start);
BUG_ON(next->vm_start > vms->end); BUG_ON(next->vm_start > vms->end);
#endif #endif
} for_each_vma_range(*(vms->vmi), next, vms->end); }
vms->next = vma_next(vms->vmi);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
/* Make sure no VMAs are about to be lost. */ /* Make sure no VMAs are about to be lost. */
......
...@@ -33,12 +33,18 @@ struct vma_munmap_struct { ...@@ -33,12 +33,18 @@ struct vma_munmap_struct {
struct vma_iterator *vmi; struct vma_iterator *vmi;
struct mm_struct *mm; struct mm_struct *mm;
struct vm_area_struct *vma; /* The first vma to munmap */ struct vm_area_struct *vma; /* The first vma to munmap */
struct vm_area_struct *prev; /* vma before the munmap area */
struct vm_area_struct *next; /* vma after the munmap area */
struct list_head *uf; /* Userfaultfd list_head */ struct list_head *uf; /* Userfaultfd list_head */
unsigned long start; /* Aligned start addr (inclusive) */ unsigned long start; /* Aligned start addr (inclusive) */
unsigned long end; /* Aligned end addr (exclusive) */ unsigned long end; /* Aligned end addr (exclusive) */
int vma_count; /* Number of vmas that will be removed */ int vma_count; /* Number of vmas that will be removed */
unsigned long nr_pages; /* Number of pages being removed */ unsigned long nr_pages; /* Number of pages being removed */
unsigned long locked_vm; /* Number of locked pages */ unsigned long locked_vm; /* Number of locked pages */
unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
unsigned long exec_vm;
unsigned long stack_vm;
unsigned long data_vm;
bool unlock; /* Unlock after the munmap */ bool unlock; /* Unlock after the munmap */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment