Commit 7c1deeeb authored by Vincenzo Frascino's avatar Vincenzo Frascino Committed by Thomas Gleixner

arm64: compat: VDSO setup for compat layer

If CONFIG_GENERIC_COMPAT_VDSO is enabled, compat vDSO is installed in a
compat (32 bit) process instead of sigpage.

Add the necessary code to setup the vDSO required pages.
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarShijith Thotton <sthotton@marvell.com>
Tested-by: default avatarAndre Przywara <andre.przywara@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@vger.kernel.org
Cc: linux-kselftest@vger.kernel.org
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Mark Salyzyn <salyzyn@android.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Dmitry Safonov <0x7f454c46@gmail.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Huw Davies <huw@codeweavers.com>
Link: https://lkml.kernel.org/r/20190621095252.32307-13-vincenzo.frascino@arm.com
parent c7aa2d71
...@@ -29,12 +29,22 @@ ...@@ -29,12 +29,22 @@
#include <asm/vdso.h> #include <asm/vdso.h>
extern char vdso_start[], vdso_end[]; extern char vdso_start[], vdso_end[];
#ifdef CONFIG_COMPAT_VDSO
extern char vdso32_start[], vdso32_end[];
#endif /* CONFIG_COMPAT_VDSO */
/* vdso_lookup arch_index */ /* vdso_lookup arch_index */
enum arch_vdso_type { enum arch_vdso_type {
ARM64_VDSO = 0, ARM64_VDSO = 0,
#ifdef CONFIG_COMPAT_VDSO
ARM64_VDSO32 = 1,
#endif /* CONFIG_COMPAT_VDSO */
}; };
#ifdef CONFIG_COMPAT_VDSO
#define VDSO_TYPES (ARM64_VDSO32 + 1)
#else
#define VDSO_TYPES (ARM64_VDSO + 1) #define VDSO_TYPES (ARM64_VDSO + 1)
#endif /* CONFIG_COMPAT_VDSO */
struct __vdso_abi { struct __vdso_abi {
const char *name; const char *name;
...@@ -53,6 +63,13 @@ static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = { ...@@ -53,6 +63,13 @@ static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
.vdso_code_start = vdso_start, .vdso_code_start = vdso_start,
.vdso_code_end = vdso_end, .vdso_code_end = vdso_end,
}, },
#ifdef CONFIG_COMPAT_VDSO
{
.name = "vdso32",
.vdso_code_start = vdso32_start,
.vdso_code_end = vdso32_end,
},
#endif /* CONFIG_COMPAT_VDSO */
}; };
/* /*
...@@ -163,24 +180,52 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index, ...@@ -163,24 +180,52 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
/* /*
* Create and map the vectors page for AArch32 tasks. * Create and map the vectors page for AArch32 tasks.
*/ */
#ifdef CONFIG_COMPAT_VDSO
static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
return __vdso_remap(ARM64_VDSO32, sm, new_vma);
}
#endif /* CONFIG_COMPAT_VDSO */
/* /*
* aarch32_vdso_pages: * aarch32_vdso_pages:
* 0 - kuser helpers * 0 - kuser helpers
* 1 - sigreturn code * 1 - sigreturn code
* or (CONFIG_COMPAT_VDSO):
* 0 - kuser helpers
* 1 - vdso data
* 2 - vdso code
*/ */
#define C_VECTORS 0 #define C_VECTORS 0
#ifdef CONFIG_COMPAT_VDSO
#define C_VVAR 1
#define C_VDSO 2
#define C_PAGES (C_VDSO + 1)
#else
#define C_SIGPAGE 1 #define C_SIGPAGE 1
#define C_PAGES (C_SIGPAGE + 1) #define C_PAGES (C_SIGPAGE + 1)
#endif /* CONFIG_COMPAT_VDSO */
static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init; static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
{ {
.name = "[vectors]", /* ABI */ .name = "[vectors]", /* ABI */
.pages = &aarch32_vdso_pages[C_VECTORS], .pages = &aarch32_vdso_pages[C_VECTORS],
}, },
#ifdef CONFIG_COMPAT_VDSO
{
.name = "[vvar]",
},
{
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
#else
{ {
.name = "[sigpage]", /* ABI */ .name = "[sigpage]", /* ABI */
.pages = &aarch32_vdso_pages[C_SIGPAGE], .pages = &aarch32_vdso_pages[C_SIGPAGE],
}, },
#endif /* CONFIG_COMPAT_VDSO */
}; };
static int aarch32_alloc_kuser_vdso_page(void) static int aarch32_alloc_kuser_vdso_page(void)
...@@ -203,7 +248,33 @@ static int aarch32_alloc_kuser_vdso_page(void) ...@@ -203,7 +248,33 @@ static int aarch32_alloc_kuser_vdso_page(void)
return 0; return 0;
} }
static int __init aarch32_alloc_vdso_pages(void) #ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
int ret;
vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
ret = __vdso_init(ARM64_VDSO32);
if (ret)
return ret;
ret = aarch32_alloc_kuser_vdso_page();
if (ret) {
unsigned long c_vvar =
(unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
unsigned long c_vdso =
(unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
free_page(c_vvar);
free_page(c_vdso);
}
return ret;
}
#else
static int __aarch32_alloc_vdso_pages(void)
{ {
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
...@@ -224,6 +295,12 @@ static int __init aarch32_alloc_vdso_pages(void) ...@@ -224,6 +295,12 @@ static int __init aarch32_alloc_vdso_pages(void)
return ret; return ret;
} }
#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
return __aarch32_alloc_vdso_pages();
}
arch_initcall(aarch32_alloc_vdso_pages); arch_initcall(aarch32_alloc_vdso_pages);
static int aarch32_kuser_helpers_setup(struct mm_struct *mm) static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
...@@ -245,6 +322,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) ...@@ -245,6 +322,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm) static int aarch32_sigreturn_setup(struct mm_struct *mm)
{ {
unsigned long addr; unsigned long addr;
...@@ -272,6 +350,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm) ...@@ -272,6 +350,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
out: out:
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
#endif /* !CONFIG_COMPAT_VDSO */
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
...@@ -285,7 +364,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -285,7 +364,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (ret) if (ret)
goto out; goto out;
#ifdef CONFIG_COMPAT_VDSO
ret = __setup_additional_pages(ARM64_VDSO32,
mm,
bprm,
uses_interp);
#else
ret = aarch32_sigreturn_setup(mm); ret = aarch32_sigreturn_setup(mm);
#endif /* CONFIG_COMPAT_VDSO */
out: out:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment