Commit 871402e0 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Linus Torvalds

mm: forbid splitting special mappings

Don't allow splitting of vm_special_mapping's.  It affects vdso/vvar
areas.  Uprobes have only one page in xol_area so they aren't affected.

Those restrictions were enforced by checks in .mremap() callbacks.
Restrict resizing with generic .split() callback.

Link: https://lkml.kernel.org/r/20201013013416.390574-7-dima@arista.comSigned-off-by: default avatarDmitry Safonov <dima@arista.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Geffon <bgeffon@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 73d5e062
...@@ -50,15 +50,6 @@ static const struct vm_special_mapping vdso_data_mapping = { ...@@ -50,15 +50,6 @@ static const struct vm_special_mapping vdso_data_mapping = {
static int vdso_mremap(const struct vm_special_mapping *sm, static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma) struct vm_area_struct *new_vma)
{ {
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
unsigned long vdso_size;
/* without VVAR page */
vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
if (vdso_size != new_size)
return -EINVAL;
current->mm->context.vdso = new_vma->vm_start; current->mm->context.vdso = new_vma->vm_start;
return 0; return 0;
......
...@@ -78,17 +78,9 @@ static union { ...@@ -78,17 +78,9 @@ static union {
} vdso_data_store __page_aligned_data; } vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = vdso_data_store.data; struct vdso_data *vdso_data = vdso_data_store.data;
static int __vdso_remap(enum vdso_abi abi, static int vdso_mremap(const struct vm_special_mapping *sm,
const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma) struct vm_area_struct *new_vma)
{ {
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
unsigned long vdso_size = vdso_info[abi].vdso_code_end -
vdso_info[abi].vdso_code_start;
if (vdso_size != new_size)
return -EINVAL;
current->mm->context.vdso = (void *)new_vma->vm_start; current->mm->context.vdso = (void *)new_vma->vm_start;
return 0; return 0;
...@@ -219,17 +211,6 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, ...@@ -219,17 +211,6 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
return vmf_insert_pfn(vma, vmf->address, pfn); return vmf_insert_pfn(vma, vmf->address, pfn);
} }
static int vvar_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
return -EINVAL;
return 0;
}
static int __setup_additional_pages(enum vdso_abi abi, static int __setup_additional_pages(enum vdso_abi abi,
struct mm_struct *mm, struct mm_struct *mm,
struct linux_binprm *bprm, struct linux_binprm *bprm,
...@@ -280,12 +261,6 @@ static int __setup_additional_pages(enum vdso_abi abi, ...@@ -280,12 +261,6 @@ static int __setup_additional_pages(enum vdso_abi abi,
/* /*
* Create and map the vectors page for AArch32 tasks. * Create and map the vectors page for AArch32 tasks.
*/ */
static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
}
enum aarch32_map { enum aarch32_map {
AA32_MAP_VECTORS, /* kuser helpers */ AA32_MAP_VECTORS, /* kuser helpers */
AA32_MAP_SIGPAGE, AA32_MAP_SIGPAGE,
...@@ -308,11 +283,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { ...@@ -308,11 +283,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VVAR] = { [AA32_MAP_VVAR] = {
.name = "[vvar]", .name = "[vvar]",
.fault = vvar_fault, .fault = vvar_fault,
.mremap = vvar_mremap,
}, },
[AA32_MAP_VDSO] = { [AA32_MAP_VDSO] = {
.name = "[vdso]", .name = "[vdso]",
.mremap = aarch32_vdso_mremap, .mremap = vdso_mremap,
}, },
}; };
...@@ -453,12 +427,6 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -453,12 +427,6 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
} }
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
}
enum aarch64_map { enum aarch64_map {
AA64_MAP_VVAR, AA64_MAP_VVAR,
AA64_MAP_VDSO, AA64_MAP_VDSO,
...@@ -468,7 +436,6 @@ static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = { ...@@ -468,7 +436,6 @@ static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
[AA64_MAP_VVAR] = { [AA64_MAP_VVAR] = {
.name = "[vvar]", .name = "[vvar]",
.fault = vvar_fault, .fault = vvar_fault,
.mremap = vvar_mremap,
}, },
[AA64_MAP_VDSO] = { [AA64_MAP_VDSO] = {
.name = "[vdso]", .name = "[vdso]",
......
...@@ -263,10 +263,6 @@ int main(int argc, char **argv) ...@@ -263,10 +263,6 @@ int main(int argc, char **argv)
fprintf(out_file, " const struct vm_special_mapping *sm,\n"); fprintf(out_file, " const struct vm_special_mapping *sm,\n");
fprintf(out_file, " struct vm_area_struct *new_vma)\n"); fprintf(out_file, " struct vm_area_struct *new_vma)\n");
fprintf(out_file, "{\n"); fprintf(out_file, "{\n");
fprintf(out_file, " unsigned long new_size =\n");
fprintf(out_file, " new_vma->vm_end - new_vma->vm_start;\n");
fprintf(out_file, " if (vdso_image.size != new_size)\n");
fprintf(out_file, " return -EINVAL;\n");
fprintf(out_file, " current->mm->context.vdso =\n"); fprintf(out_file, " current->mm->context.vdso =\n");
fprintf(out_file, " (void *)(new_vma->vm_start);\n"); fprintf(out_file, " (void *)(new_vma->vm_start);\n");
fprintf(out_file, " return 0;\n"); fprintf(out_file, " return 0;\n");
......
...@@ -61,17 +61,8 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, ...@@ -61,17 +61,8 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
static int vdso_mremap(const struct vm_special_mapping *sm, static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
unsigned long vdso_pages;
vdso_pages = vdso64_pages;
if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
return -EINVAL;
if (WARN_ON_ONCE(current->mm != vma->vm_mm))
return -EFAULT;
current->mm->context.vdso_base = vma->vm_start; current->mm->context.vdso_base = vma->vm_start;
return 0; return 0;
} }
......
...@@ -89,30 +89,14 @@ static void vdso_fix_landing(const struct vdso_image *image, ...@@ -89,30 +89,14 @@ static void vdso_fix_landing(const struct vdso_image *image,
static int vdso_mremap(const struct vm_special_mapping *sm, static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma) struct vm_area_struct *new_vma)
{ {
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
const struct vdso_image *image = current->mm->context.vdso_image; const struct vdso_image *image = current->mm->context.vdso_image;
if (image->size != new_size)
return -EINVAL;
vdso_fix_landing(image, new_vma); vdso_fix_landing(image, new_vma);
current->mm->context.vdso = (void __user *)new_vma->vm_start; current->mm->context.vdso = (void __user *)new_vma->vm_start;
return 0; return 0;
} }
static int vvar_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
if (new_size != -image->sym_vvar_start)
return -EINVAL;
return 0;
}
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
static struct page *find_timens_vvar_page(struct vm_area_struct *vma) static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{ {
...@@ -252,7 +236,6 @@ static const struct vm_special_mapping vdso_mapping = { ...@@ -252,7 +236,6 @@ static const struct vm_special_mapping vdso_mapping = {
static const struct vm_special_mapping vvar_mapping = { static const struct vm_special_mapping vvar_mapping = {
.name = "[vvar]", .name = "[vvar]",
.fault = vvar_fault, .fault = vvar_fault,
.mremap = vvar_mremap,
}; };
/* /*
......
...@@ -3422,6 +3422,17 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma, ...@@ -3422,6 +3422,17 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma,
return 0; return 0;
} }
static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
{
/*
* Forbid splitting special mappings - kernel has expectations over
* the number of pages in mapping. Together with VM_DONTEXPAND
* the size of vma should stay the same over the special mapping's
* lifetime.
*/
return -EINVAL;
}
static const struct vm_operations_struct special_mapping_vmops = { static const struct vm_operations_struct special_mapping_vmops = {
.close = special_mapping_close, .close = special_mapping_close,
.fault = special_mapping_fault, .fault = special_mapping_fault,
...@@ -3429,6 +3440,7 @@ static const struct vm_operations_struct special_mapping_vmops = { ...@@ -3429,6 +3440,7 @@ static const struct vm_operations_struct special_mapping_vmops = {
.name = special_mapping_name, .name = special_mapping_name,
/* vDSO code relies that VVAR can't be accessed remotely */ /* vDSO code relies that VVAR can't be accessed remotely */
.access = NULL, .access = NULL,
.may_split = special_mapping_split,
}; };
static const struct vm_operations_struct legacy_special_mapping_vmops = { static const struct vm_operations_struct legacy_special_mapping_vmops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment