Commit 0d747f65 authored by Vincenzo Frascino's avatar Vincenzo Frascino Committed by Will Deacon

arm64: compat: Alloc separate pages for vectors and sigpage

For AArch32 tasks, we install a special "[vectors]" page that contains
the sigreturn trampolines and kuser helpers, which is mapped at a fixed
address specified by the kuser helpers ABI.

Having the sigreturn trampolines in the same page as the kuser helpers
makes it impossible to disable the kuser helpers independently.

Follow the Arm implementation, by moving the signal trampolines out of
the "[vectors]" page and into their own "[sigpage]".

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
[will: tweaked comments and fixed sparse warning]
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 36a2ba07
...@@ -214,10 +214,10 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ...@@ -214,10 +214,10 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
set_thread_flag(TIF_32BIT); \ set_thread_flag(TIF_32BIT); \
}) })
#define COMPAT_ARCH_DLINFO #define COMPAT_ARCH_DLINFO
extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, extern int aarch32_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
#define compat_arch_setup_additional_pages \ #define compat_arch_setup_additional_pages \
aarch32_setup_vectors_page aarch32_setup_additional_pages
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
......
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#include <linux/compat.h> #include <linux/compat.h>
#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs); struct pt_regs *regs);
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
......
...@@ -403,8 +403,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, ...@@ -403,8 +403,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_SIGINFO) if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3; idx += 3;
retcode = AARCH32_VECTORS_BASE + retcode = (unsigned long)current->mm->context.vdso +
AARCH32_KERN_SIGRET_CODE_OFFSET +
(idx << 2) + thumb; (idx << 2) + thumb;
} }
......
/* /*
* VDSO implementation for AArch64 and vector page setup for AArch32. * VDSO implementations.
* *
* Copyright (C) 2012 ARM Limited * Copyright (C) 2012 ARM Limited
* *
...@@ -53,61 +53,123 @@ struct vdso_data *vdso_data = &vdso_data_store.data; ...@@ -53,61 +53,123 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
/* /*
* Create and map the vectors page for AArch32 tasks. * Create and map the vectors page for AArch32 tasks.
*/ */
static struct page *vectors_page[1] __ro_after_init; #define C_VECTORS 0
#define C_SIGPAGE 1
#define C_PAGES (C_SIGPAGE + 1)
static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
{
.name = "[vectors]", /* ABI */
.pages = &aarch32_vdso_pages[C_VECTORS],
},
{
.name = "[sigpage]", /* ABI */
.pages = &aarch32_vdso_pages[C_SIGPAGE],
},
};
static int __init alloc_vectors_page(void) static int __init aarch32_alloc_vdso_pages(void)
{ {
extern char __kuser_helper_start[], __kuser_helper_end[]; extern char __kuser_helper_start[], __kuser_helper_end[];
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start; int kuser_sz = __kuser_helper_end - __kuser_helper_start;
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage; unsigned long vdso_pages[C_PAGES];
vpage = get_zeroed_page(GFP_ATOMIC); vdso_pages[C_VECTORS] = get_zeroed_page(GFP_ATOMIC);
if (!vdso_pages[C_VECTORS])
return -ENOMEM;
if (!vpage) vdso_pages[C_SIGPAGE] = get_zeroed_page(GFP_ATOMIC);
if (!vdso_pages[C_SIGPAGE]) {
free_page(vdso_pages[C_VECTORS]);
return -ENOMEM; return -ENOMEM;
}
/* kuser helpers */ /* kuser helpers */
memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, memcpy((void *)(vdso_pages[C_VECTORS] + 0x1000 - kuser_sz),
kuser_sz); __kuser_helper_start,
kuser_sz);
/* sigreturn code */ /* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, memcpy((void *)vdso_pages[C_SIGPAGE], __aarch32_sigret_code_start,
__aarch32_sigret_code_start, sigret_sz); sigret_sz);
flush_icache_range(vdso_pages[C_VECTORS],
vdso_pages[C_VECTORS] + PAGE_SIZE);
flush_icache_range(vdso_pages[C_SIGPAGE],
vdso_pages[C_SIGPAGE] + PAGE_SIZE);
flush_icache_range(vpage, vpage + PAGE_SIZE); aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]);
vectors_page[0] = virt_to_page(vpage); aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]);
return 0; return 0;
} }
arch_initcall(alloc_vectors_page); arch_initcall(aarch32_alloc_vdso_pages);
int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
{ {
struct mm_struct *mm = current->mm; void *ret;
unsigned long addr = AARCH32_VECTORS_BASE;
static const struct vm_special_mapping spec = { /*
.name = "[vectors]", * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
.pages = vectors_page, * not safe to CoW the page containing the CPU exception vectors.
*/
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYEXEC,
&aarch32_vdso_spec[C_VECTORS]);
return PTR_ERR_OR_ZERO(ret);
}
}; static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
unsigned long addr;
void *ret; void *ret;
if (down_write_killable(&mm->mmap_sem)) addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
return -EINTR; if (IS_ERR_VALUE(addr)) {
current->mm->context.vdso = (void *)addr; ret = ERR_PTR(addr);
goto out;
}
/* Map vectors page at the high address. */ /*
* VM_MAYWRITE is required to allow gdb to Copy-on-Write and
* set breakpoints.
*/
ret = _install_special_mapping(mm, addr, PAGE_SIZE, ret = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, VM_READ | VM_EXEC | VM_MAYREAD |
&spec); VM_MAYWRITE | VM_MAYEXEC,
&aarch32_vdso_spec[C_SIGPAGE]);
if (IS_ERR(ret))
goto out;
up_write(&mm->mmap_sem); mm->context.vdso = (void *)addr;
out:
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
int ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = aarch32_kuser_helpers_setup(mm);
if (ret)
goto out;
ret = aarch32_sigreturn_setup(mm);
out:
up_write(&mm->mmap_sem);
return ret;
}
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
static int vdso_mremap(const struct vm_special_mapping *sm, static int vdso_mremap(const struct vm_special_mapping *sm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment