Commit 1255a734 authored by Vincenzo Frascino's avatar Vincenzo Frascino Committed by Will Deacon

arm64: compat: Refactor aarch32_alloc_vdso_pages()

aarch32_alloc_vdso_pages() needs to be refactored to make it
easier to disable kuser helpers.

Divide the function in aarch32_alloc_kuser_vdso_page() and
aarch32_alloc_sigreturn_vdso_page().

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
[will: Inlined sigpage allocation to simplify error paths]
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent d1e5ca64
...@@ -68,43 +68,43 @@ static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { ...@@ -68,43 +68,43 @@ static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
}, },
}; };
static int __init aarch32_alloc_vdso_pages(void) static int aarch32_alloc_kuser_vdso_page(void)
{ {
extern char __kuser_helper_start[], __kuser_helper_end[]; extern char __kuser_helper_start[], __kuser_helper_end[];
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start; int kuser_sz = __kuser_helper_end - __kuser_helper_start;
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; unsigned long vdso_page;
unsigned long vdso_pages[C_PAGES];
vdso_pages[C_VECTORS] = get_zeroed_page(GFP_ATOMIC);
if (!vdso_pages[C_VECTORS])
return -ENOMEM;
vdso_pages[C_SIGPAGE] = get_zeroed_page(GFP_ATOMIC); vdso_page = get_zeroed_page(GFP_ATOMIC);
if (!vdso_pages[C_SIGPAGE]) { if (!vdso_page)
free_page(vdso_pages[C_VECTORS]);
return -ENOMEM; return -ENOMEM;
}
/* kuser helpers */ memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
memcpy((void *)(vdso_pages[C_VECTORS] + 0x1000 - kuser_sz),
__kuser_helper_start,
kuser_sz); kuser_sz);
aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
return 0;
}
/* sigreturn code */ static int __init aarch32_alloc_vdso_pages(void)
memcpy((void *)vdso_pages[C_SIGPAGE], __aarch32_sigret_code_start, {
sigret_sz); extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage;
int ret;
flush_icache_range(vdso_pages[C_VECTORS], sigpage = get_zeroed_page(GFP_ATOMIC);
vdso_pages[C_VECTORS] + PAGE_SIZE); if (!sigpage)
flush_icache_range(vdso_pages[C_SIGPAGE], return -ENOMEM;
vdso_pages[C_SIGPAGE] + PAGE_SIZE);
aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]); memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]); aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
return 0; ret = aarch32_alloc_kuser_vdso_page();
if (ret)
free_page(sigpage);
return ret;
} }
arch_initcall(aarch32_alloc_vdso_pages); arch_initcall(aarch32_alloc_vdso_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment