Commit e3975891 authored by Chen Gang's avatar Chen Gang Committed by Linus Torvalds

mm/mmap.c: simplify the failure return working flow

__split_vma() doesn't need out_err label, neither need initializing err.

copy_vma() can return NULL directly when kmem_cache_alloc() fails.
Signed-off-by: default avatarChen Gang <gang.chen.5i5j@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 44a30220
...@@ -2455,7 +2455,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2455,7 +2455,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below) unsigned long addr, int new_below)
{ {
struct vm_area_struct *new; struct vm_area_struct *new;
int err = -ENOMEM; int err;
if (is_vm_hugetlb_page(vma) && (addr & if (is_vm_hugetlb_page(vma) && (addr &
~(huge_page_mask(hstate_vma(vma))))) ~(huge_page_mask(hstate_vma(vma)))))
...@@ -2463,7 +2463,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2463,7 +2463,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new) if (!new)
goto out_err; return -ENOMEM;
/* most fields are the same, copy all, and then fixup */ /* most fields are the same, copy all, and then fixup */
*new = *vma; *new = *vma;
...@@ -2511,7 +2511,6 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2511,7 +2511,6 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mpol_put(vma_policy(new)); mpol_put(vma_policy(new));
out_free_vma: out_free_vma:
kmem_cache_free(vm_area_cachep, new); kmem_cache_free(vm_area_cachep, new);
out_err:
return err; return err;
} }
...@@ -2952,30 +2951,31 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, ...@@ -2952,30 +2951,31 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
} else { } else {
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (new_vma) { if (!new_vma)
*new_vma = *vma; goto out;
new_vma->vm_start = addr; *new_vma = *vma;
new_vma->vm_end = addr + len; new_vma->vm_start = addr;
new_vma->vm_pgoff = pgoff; new_vma->vm_end = addr + len;
if (vma_dup_policy(vma, new_vma)) new_vma->vm_pgoff = pgoff;
goto out_free_vma; if (vma_dup_policy(vma, new_vma))
INIT_LIST_HEAD(&new_vma->anon_vma_chain); goto out_free_vma;
if (anon_vma_clone(new_vma, vma)) INIT_LIST_HEAD(&new_vma->anon_vma_chain);
goto out_free_mempol; if (anon_vma_clone(new_vma, vma))
if (new_vma->vm_file) goto out_free_mempol;
get_file(new_vma->vm_file); if (new_vma->vm_file)
if (new_vma->vm_ops && new_vma->vm_ops->open) get_file(new_vma->vm_file);
new_vma->vm_ops->open(new_vma); if (new_vma->vm_ops && new_vma->vm_ops->open)
vma_link(mm, new_vma, prev, rb_link, rb_parent); new_vma->vm_ops->open(new_vma);
*need_rmap_locks = false; vma_link(mm, new_vma, prev, rb_link, rb_parent);
} *need_rmap_locks = false;
} }
return new_vma; return new_vma;
out_free_mempol: out_free_mempol:
mpol_put(vma_policy(new_vma)); mpol_put(vma_policy(new_vma));
out_free_vma: out_free_vma:
kmem_cache_free(vm_area_cachep, new_vma); kmem_cache_free(vm_area_cachep, new_vma);
out:
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment