Commit 09357814 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

mm: add the "struct mm_struct *mm" local into

Cosmetic, but expand_upwards() and expand_downwards() overuse vma->vm_mm,
a local variable makes sense imho.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 87e8827b
...@@ -2148,6 +2148,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns ...@@ -2148,6 +2148,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
*/ */
int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm;
int error; int error;
if (!(vma->vm_flags & VM_GROWSUP)) if (!(vma->vm_flags & VM_GROWSUP))
...@@ -2197,10 +2198,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -2197,10 +2198,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
* So, we reuse mm->page_table_lock to guard * So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions. * against concurrent vma expansions.
*/ */
spin_lock(&vma->vm_mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow; mm->locked_vm += grow;
vm_stat_account(vma->vm_mm, vma->vm_flags, vm_stat_account(mm, vma->vm_flags,
vma->vm_file, grow); vma->vm_file, grow);
anon_vma_interval_tree_pre_update_vma(vma); anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address; vma->vm_end = address;
...@@ -2208,8 +2209,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -2208,8 +2209,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_next) if (vma->vm_next)
vma_gap_update(vma->vm_next); vma_gap_update(vma->vm_next);
else else
vma->vm_mm->highest_vm_end = address; mm->highest_vm_end = address;
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma); perf_event_mmap(vma);
} }
...@@ -2217,7 +2218,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -2217,7 +2218,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
} }
vma_unlock_anon_vma(vma); vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags); khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm); validate_mm(mm);
return error; return error;
} }
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
...@@ -2228,6 +2229,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -2228,6 +2229,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
int expand_downwards(struct vm_area_struct *vma, int expand_downwards(struct vm_area_struct *vma,
unsigned long address) unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm;
int error; int error;
/* /*
...@@ -2272,17 +2274,17 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2272,17 +2274,17 @@ int expand_downwards(struct vm_area_struct *vma,
* So, we reuse mm->page_table_lock to guard * So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions. * against concurrent vma expansions.
*/ */
spin_lock(&vma->vm_mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow; mm->locked_vm += grow;
vm_stat_account(vma->vm_mm, vma->vm_flags, vm_stat_account(mm, vma->vm_flags,
vma->vm_file, grow); vma->vm_file, grow);
anon_vma_interval_tree_pre_update_vma(vma); anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address; vma->vm_start = address;
vma->vm_pgoff -= grow; vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma); anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma); vma_gap_update(vma);
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma); perf_event_mmap(vma);
} }
...@@ -2290,7 +2292,7 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2290,7 +2292,7 @@ int expand_downwards(struct vm_area_struct *vma,
} }
vma_unlock_anon_vma(vma); vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags); khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm); validate_mm(mm);
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment