Commit c8ba2065 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap 27 memset 0 vma

From: Hugh Dickins <hugh@veritas.com>

We're NULLifying more and more fields when initializing a vma
(mpol_set_vma_default does that too, if configured to do anything).  Now use
memset to avoid specifying fields, and save a little code too.

(Yes, I realize anon_vma will want to set vm_pgoff non-0, but I think that
will be better handled at the core, since anon vm_pgoff is negotiable up until
an anon_vma is actually assigned.)
parent ee7baa35
...@@ -74,15 +74,13 @@ ia64_elf32_init (struct pt_regs *regs) ...@@ -74,15 +74,13 @@ ia64_elf32_init (struct pt_regs *regs)
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
vma->vm_start = IA32_GDT_OFFSET; vma->vm_start = IA32_GDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_SHARED; vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_MAYREAD; vma->vm_flags = VM_READ|VM_MAYREAD;
vma->vm_ops = &ia32_shared_page_vm_ops; vma->vm_ops = &ia32_shared_page_vm_ops;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
{ {
insert_vm_struct(current->mm, vma); insert_vm_struct(current->mm, vma);
...@@ -96,16 +94,12 @@ ia64_elf32_init (struct pt_regs *regs) ...@@ -96,16 +94,12 @@ ia64_elf32_init (struct pt_regs *regs)
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
vma->vm_start = IA32_LDT_OFFSET; vma->vm_start = IA32_LDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
vma->vm_page_prot = PAGE_SHARED; vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE; vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
mpol_set_vma_default(vma);
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
{ {
insert_vm_struct(current->mm, vma); insert_vm_struct(current->mm, vma);
......
...@@ -2309,6 +2309,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon ...@@ -2309,6 +2309,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
DPRINT(("Cannot allocate vma\n")); DPRINT(("Cannot allocate vma\n"));
goto error_kmem; goto error_kmem;
} }
memset(vma, 0, sizeof(*vma));
/* /*
* partially initialize the vma for the sampling buffer * partially initialize the vma for the sampling buffer
* *
...@@ -2319,11 +2321,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon ...@@ -2319,11 +2321,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
vma->vm_mm = mm; vma->vm_mm = mm;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
mpol_set_vma_default(vma);
vma->vm_private_data = NULL;
/* /*
* Now we have everything we need and we can initialize * Now we have everything we need and we can initialize
......
...@@ -124,16 +124,12 @@ ia64_init_addr_space (void) ...@@ -124,16 +124,12 @@ ia64_init_addr_space (void)
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP; vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
mpol_set_vma_default(vma);
insert_vm_struct(current->mm, vma); insert_vm_struct(current->mm, vma);
} }
...@@ -146,7 +142,6 @@ ia64_init_addr_space (void) ...@@ -146,7 +142,6 @@ ia64_init_addr_space (void)
vma->vm_end = PAGE_SIZE; vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
mpol_set_vma_default(vma);
insert_vm_struct(current->mm, vma); insert_vm_struct(current->mm, vma);
} }
} }
......
...@@ -404,6 +404,8 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) ...@@ -404,6 +404,8 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
return -ENOMEM; return -ENOMEM;
} }
memset(mpnt, 0, sizeof(*mpnt));
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
{ {
mpnt->vm_mm = mm; mpnt->vm_mm = mm;
...@@ -425,11 +427,6 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) ...@@ -425,11 +427,6 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
else else
mpnt->vm_flags = VM_STACK_FLAGS; mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7]; mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
mpnt->vm_ops = NULL;
mpnt->vm_pgoff = 0;
mpnt->vm_file = NULL;
mpol_set_vma_default(mpnt);
mpnt->vm_private_data = (void *) 0;
insert_vm_struct(mm, mpnt); insert_vm_struct(mm, mpnt);
mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
} }
......
...@@ -689,21 +689,18 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -689,21 +689,18 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
* not unmapped, but the maps are removed from the list. * not unmapped, but the maps are removed from the list.
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma) {
error = -ENOMEM; error = -ENOMEM;
if (!vma)
goto unacct_error; goto unacct_error;
}
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm; vma->vm_mm = mm;
vma->vm_start = addr; vma->vm_start = addr;
vma->vm_end = addr + len; vma->vm_end = addr + len;
vma->vm_flags = vm_flags; vma->vm_flags = vm_flags;
vma->vm_page_prot = protection_map[vm_flags & 0x0f]; vma->vm_page_prot = protection_map[vm_flags & 0x0f];
vma->vm_ops = NULL;
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
vma->vm_next = NULL;
mpol_set_vma_default(vma);
if (file) { if (file) {
error = -EINVAL; error = -EINVAL;
...@@ -1447,17 +1444,13 @@ unsigned long do_brk(unsigned long addr, unsigned long len) ...@@ -1447,17 +1444,13 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
vm_unacct_memory(len >> PAGE_SHIFT); vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM; return -ENOMEM;
} }
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm; vma->vm_mm = mm;
vma->vm_start = addr; vma->vm_start = addr;
vma->vm_end = addr + len; vma->vm_end = addr + len;
vma->vm_flags = flags; vma->vm_flags = flags;
vma->vm_page_prot = protection_map[flags & 0x0f]; vma->vm_page_prot = protection_map[flags & 0x0f];
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
mpol_set_vma_default(vma);
vma_link(mm, vma, prev, rb_link, rb_parent); vma_link(mm, vma, prev, rb_link, rb_parent);
out: out:
mm->total_vm += len >> PAGE_SHIFT; mm->total_vm += len >> PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment