Commit 601255ae authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: vdso: move data page before code pages

Andy pointed out that binutils generates additional sections in the vdso
image (e.g. section string table) which, if our .text section gets big
enough, could cross a page boundary and end up screwing up the location
where the kernel expects to put the data page.

This patch solves the issue in the same manner as x86_32, by moving the
data page before the code pages.

Cc: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 2fea7f6c
...@@ -121,8 +121,8 @@ static int __init vdso_init(void) ...@@ -121,8 +121,8 @@ static int __init vdso_init(void)
} }
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
vdso_pages + 1, vdso_pages, 1L, &vdso_start); vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */ /* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
...@@ -130,22 +130,22 @@ static int __init vdso_init(void) ...@@ -130,22 +130,22 @@ static int __init vdso_init(void)
if (vdso_pagelist == NULL) if (vdso_pagelist == NULL)
return -ENOMEM; return -ENOMEM;
/* Grab the vDSO data page. */
vdso_pagelist[0] = virt_to_page(vdso_data);
/* Grab the vDSO code pages. */ /* Grab the vDSO code pages. */
for (i = 0; i < vdso_pages; i++) for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Grab the vDSO data page. */
vdso_pagelist[i] = virt_to_page(vdso_data);
/* Populate the special mapping structures */ /* Populate the special mapping structures */
vdso_spec[0] = (struct vm_special_mapping) { vdso_spec[0] = (struct vm_special_mapping) {
.name = "[vdso]", .name = "[vvar]",
.pages = vdso_pagelist, .pages = vdso_pagelist,
}; };
vdso_spec[1] = (struct vm_special_mapping) { vdso_spec[1] = (struct vm_special_mapping) {
.name = "[vvar]", .name = "[vdso]",
.pages = vdso_pagelist + vdso_pages, .pages = &vdso_pagelist[1],
}; };
return 0; return 0;
...@@ -169,22 +169,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -169,22 +169,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
ret = ERR_PTR(vdso_base); ret = ERR_PTR(vdso_base);
goto up_fail; goto up_fail;
} }
mm->context.vdso = (void *)vdso_base; ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_MAYREAD,
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_spec[0]); &vdso_spec[0]);
if (IS_ERR(ret)) if (IS_ERR(ret))
goto up_fail; goto up_fail;
vdso_base += vdso_text_len; vdso_base += PAGE_SIZE;
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, mm->context.vdso = (void *)vdso_base;
VM_READ|VM_MAYREAD, ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_spec[1]); &vdso_spec[1]);
if (IS_ERR(ret)) if (IS_ERR(ret))
goto up_fail; goto up_fail;
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
return 0; return 0;
......
...@@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64) ...@@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64)
SECTIONS SECTIONS
{ {
PROVIDE(_vdso_data = . - PAGE_SIZE);
. = VDSO_LBASE + SIZEOF_HEADERS; . = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text .hash : { *(.hash) } :text
...@@ -57,9 +58,6 @@ SECTIONS ...@@ -57,9 +58,6 @@ SECTIONS
_end = .; _end = .;
PROVIDE(end = .); PROVIDE(end = .);
. = ALIGN(PAGE_SIZE);
PROVIDE(_vdso_data = .);
/DISCARD/ : { /DISCARD/ : {
*(.note.GNU-stack) *(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*) *(.data .data.* .gnu.linkonce.d.* .sdata*)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment