Commit b2df3f60 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/vdso: Simplify arch_setup_additional_pages() exit

To simplify arch_setup_additional_pages() exit, rename
it __arch_setup_additional_pages() and create a caller
arch_setup_additional_pages() which does the locking.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/603c1d039d3f928ee95e547fcd2219fcf4c3b514.1601197618.git.christophe.leroy@csgroup.eu
parent 7461a4f7
...@@ -122,7 +122,7 @@ struct lib64_elfinfo ...@@ -122,7 +122,7 @@ struct lib64_elfinfo
* This is called from binfmt_elf, we create the special vma for the * This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree * vDSO and insert it into the mm struct tree
*/ */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct page **vdso_pagelist; struct page **vdso_pagelist;
...@@ -130,9 +130,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -130,9 +130,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long vdso_base; unsigned long vdso_base;
int rc; int rc;
if (!vdso_ready)
return 0;
if (is_32bit_task()) { if (is_32bit_task()) {
vdso_pagelist = vdso32_pagelist; vdso_pagelist = vdso32_pagelist;
vdso_size = &vdso32_end - &vdso32_start; vdso_size = &vdso32_end - &vdso32_start;
...@@ -148,8 +145,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -148,8 +145,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_base = 0; vdso_base = 0;
} }
current->mm->context.vdso_base = 0;
/* Add a page to the vdso size for the data page */ /* Add a page to the vdso size for the data page */
vdso_size += PAGE_SIZE; vdso_size += PAGE_SIZE;
...@@ -159,15 +154,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -159,15 +154,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* and end up putting it elsewhere. * and end up putting it elsewhere.
* Add enough to the size so that the result can be aligned. * Add enough to the size so that the result can be aligned.
*/ */
if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base, vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
0, 0); 0, 0);
if (IS_ERR_VALUE(vdso_base)) { if (IS_ERR_VALUE(vdso_base))
rc = vdso_base; return vdso_base;
goto fail_mmapsem;
}
/* Add required alignment. */ /* Add required alignment. */
vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
...@@ -193,15 +184,26 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -193,15 +184,26 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
VM_READ|VM_EXEC| VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist); vdso_pagelist);
if (rc) { return rc;
current->mm->context.vdso_base = 0; }
goto fail_mmapsem;
}
mmap_write_unlock(mm); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return 0; {
struct mm_struct *mm = current->mm;
int rc;
mm->context.vdso_base = 0;
if (!vdso_ready)
return 0;
if (mmap_write_lock_killable(mm))
return -EINTR;
rc = __arch_setup_additional_pages(bprm, uses_interp);
if (rc)
mm->context.vdso_base = 0;
fail_mmapsem:
mmap_write_unlock(mm); mmap_write_unlock(mm);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment