Commit b6a2fea3 authored by Ollie Wild's avatar Ollie Wild Committed by Linus Torvalds

mm: variable length argument support

Remove the arg+env limit of MAX_ARG_PAGES by copying the strings directly from
the old mm into the new mm.

We create the new mm before the binfmt code runs, and place the new stack at
the very top of the address space.  Once the binfmt code runs and figures out
where the stack should be, we move it downwards.

It is a bit peculiar in that we have one task with two mm's, one of which is
inactive.

[a.p.zijlstra@chello.nl: limit stack size]
Signed-off-by: default avatarOllie Wild <aaw@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: <linux-arch@vger.kernel.org>
Cc: Hugh Dickins <hugh@veritas.com>
[bunk@stusta.de: unexport bprm_mm_init]
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bdf4c48a
...@@ -195,62 +195,27 @@ ia64_elf32_init (struct pt_regs *regs) ...@@ -195,62 +195,27 @@ ia64_elf32_init (struct pt_regs *regs)
ia32_load_state(current); ia32_load_state(current);
} }
/*
* Undo the override of setup_arg_pages() without this ia32_setup_arg_pages()
* will suffer infinite self recursion.
*/
#undef setup_arg_pages
int int
ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
{ {
unsigned long stack_base; int ret;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm; ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack);
int i, ret; if (!ret) {
/*
stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; * Can't do it in ia64_elf32_init(). Needs to be done before
mm->arg_start = bprm->p + stack_base; * calls to elf32_map()
*/
bprm->p += stack_base; current->thread.ppl = ia32_init_pp_list();
if (bprm->loader)
bprm->loader += stack_base;
bprm->exec += stack_base;
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
down_write(&current->mm->mmap_sem);
{
mpnt->vm_mm = current->mm;
mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
mpnt->vm_end = IA32_STACK_TOP;
if (executable_stack == EXSTACK_ENABLE_X)
mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
else if (executable_stack == EXSTACK_DISABLE_X)
mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
else
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
PAGE_COPY_EXEC: PAGE_COPY;
if ((ret = insert_vm_struct(current->mm, mpnt))) {
up_write(&current->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
} }
for (i = 0 ; i < MAX_ARG_PAGES ; i++) { return ret;
struct page *page = bprm->page[i];
if (page) {
bprm->page[i] = NULL;
install_arg_page(mpnt, page, stack_base);
}
stack_base += PAGE_SIZE;
}
up_write(&current->mm->mmap_sem);
/* Can't do it in ia64_elf32_init(). Needs to be done before calls to
elf32_map() */
current->thread.ppl = ia32_init_pp_list();
return 0;
} }
static void static void
......
...@@ -404,7 +404,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) ...@@ -404,7 +404,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
set_brk(current->mm->start_brk, current->mm->brk); set_brk(current->mm->start_brk, current->mm->brk);
retval = ia32_setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
if (retval < 0) { if (retval < 0) {
/* Someone check-me: is this error path enough? */ /* Someone check-me: is this error path enough? */
send_sig(SIGKILL, current, 0); send_sig(SIGKILL, current, 0);
......
...@@ -232,9 +232,6 @@ do { \ ...@@ -232,9 +232,6 @@ do { \
#define load_elf_binary load_elf32_binary #define load_elf_binary load_elf32_binary
#define ELF_PLAT_INIT(r, load_addr) elf32_init(r) #define ELF_PLAT_INIT(r, load_addr) elf32_init(r)
#define setup_arg_pages(bprm, stack_top, exec_stack) \
ia32_setup_arg_pages(bprm, stack_top, exec_stack)
int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack);
#undef start_thread #undef start_thread
#define start_thread(regs,new_rip,new_rsp) do { \ #define start_thread(regs,new_rip,new_rsp) do { \
...@@ -286,61 +283,6 @@ static void elf32_init(struct pt_regs *regs) ...@@ -286,61 +283,6 @@ static void elf32_init(struct pt_regs *regs)
me->thread.es = __USER_DS; me->thread.es = __USER_DS;
} }
int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
int executable_stack)
{
unsigned long stack_base;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm;
int i, ret;
stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE;
mm->arg_start = bprm->p + stack_base;
bprm->p += stack_base;
if (bprm->loader)
bprm->loader += stack_base;
bprm->exec += stack_base;
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
down_write(&mm->mmap_sem);
{
mpnt->vm_mm = mm;
mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
mpnt->vm_end = stack_top;
if (executable_stack == EXSTACK_ENABLE_X)
mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
else if (executable_stack == EXSTACK_DISABLE_X)
mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
else
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
PAGE_COPY_EXEC : PAGE_COPY;
if ((ret = insert_vm_struct(mm, mpnt))) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
struct page *page = bprm->page[i];
if (page) {
bprm->page[i] = NULL;
install_arg_page(mpnt, page, stack_base);
}
stack_base += PAGE_SIZE;
}
up_write(&mm->mmap_sem);
return 0;
}
EXPORT_SYMBOL(ia32_setup_arg_pages);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
/* Register vsyscall32 into the ABI table */ /* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h> #include <linux/sysctl.h>
......
...@@ -148,6 +148,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, ...@@ -148,6 +148,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
elf_addr_t *elf_info; elf_addr_t *elf_info;
int ei_index = 0; int ei_index = 0;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct vm_area_struct *vma;
/* /*
* If this architecture has a platform capability string, copy it * If this architecture has a platform capability string, copy it
...@@ -234,6 +235,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, ...@@ -234,6 +235,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
sp = (elf_addr_t __user *)bprm->p; sp = (elf_addr_t __user *)bprm->p;
#endif #endif
/*
* Grow the stack manually; some architectures have a limit on how
* far ahead a user-space access may be in order to grow the stack.
*/
vma = find_extend_vma(current->mm, bprm->p);
if (!vma)
return -EFAULT;
/* Now, let's put argc (and argv, envp if appropriate) on the stack */ /* Now, let's put argc (and argv, envp if appropriate) on the stack */
if (__put_user(argc, sp++)) if (__put_user(argc, sp++))
return -EFAULT; return -EFAULT;
...@@ -254,8 +264,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, ...@@ -254,8 +264,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
size_t len; size_t len;
if (__put_user((elf_addr_t)p, argv++)) if (__put_user((elf_addr_t)p, argv++))
return -EFAULT; return -EFAULT;
len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) if (!len || len > MAX_ARG_STRLEN)
return 0; return 0;
p += len; p += len;
} }
...@@ -266,8 +276,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, ...@@ -266,8 +276,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
size_t len; size_t len;
if (__put_user((elf_addr_t)p, envp++)) if (__put_user((elf_addr_t)p, envp++))
return -EFAULT; return -EFAULT;
len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) if (!len || len > MAX_ARG_STRLEN)
return 0; return 0;
p += len; p += len;
} }
...@@ -826,10 +836,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) ...@@ -826,10 +836,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
} }
/* OK, This is the point of no return */ /* OK, This is the point of no return */
current->mm->start_data = 0;
current->mm->end_data = 0;
current->mm->end_code = 0;
current->mm->mmap = NULL;
current->flags &= ~PF_FORKNOEXEC; current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags; current->mm->def_flags = def_flags;
...@@ -1051,9 +1057,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) ...@@ -1051,9 +1057,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
compute_creds(bprm); compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC; current->flags &= ~PF_FORKNOEXEC;
create_elf_tables(bprm, &loc->elf_ex, retval = create_elf_tables(bprm, &loc->elf_ex,
(interpreter_type == INTERPRETER_AOUT), (interpreter_type == INTERPRETER_AOUT),
load_addr, interp_load_addr); load_addr, interp_load_addr);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out;
}
/* N.B. passed_fileno might not be initialized? */ /* N.B. passed_fileno might not be initialized? */
if (interpreter_type == INTERPRETER_AOUT) if (interpreter_type == INTERPRETER_AOUT)
current->mm->arg_start += strlen(passed_fileno) + 1; current->mm->arg_start += strlen(passed_fileno) + 1;
......
...@@ -621,8 +621,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, ...@@ -621,8 +621,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
p = (char __user *) current->mm->arg_start; p = (char __user *) current->mm->arg_start;
for (loop = bprm->argc; loop > 0; loop--) { for (loop = bprm->argc; loop > 0; loop--) {
__put_user((elf_caddr_t) p, argv++); __put_user((elf_caddr_t) p, argv++);
len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES); len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > PAGE_SIZE * MAX_ARG_PAGES) if (!len || len > MAX_ARG_STRLEN)
return -EINVAL; return -EINVAL;
p += len; p += len;
} }
...@@ -633,8 +633,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, ...@@ -633,8 +633,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
current->mm->env_start = (unsigned long) p; current->mm->env_start = (unsigned long) p;
for (loop = bprm->envc; loop > 0; loop--) { for (loop = bprm->envc; loop > 0; loop--) {
__put_user((elf_caddr_t)(unsigned long) p, envp++); __put_user((elf_caddr_t)(unsigned long) p, envp++);
len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES); len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > PAGE_SIZE * MAX_ARG_PAGES) if (!len || len > MAX_ARG_STRLEN)
return -EINVAL; return -EINVAL;
p += len; p += len;
} }
......
...@@ -126,7 +126,9 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) ...@@ -126,7 +126,9 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto _ret; goto _ret;
if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
remove_arg_zero(bprm); retval = remove_arg_zero(bprm);
if (retval)
goto _ret;
} }
if (fmt->flags & MISC_FMT_OPEN_BINARY) { if (fmt->flags & MISC_FMT_OPEN_BINARY) {
......
...@@ -67,7 +67,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) ...@@ -67,7 +67,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
* This is done in reverse order, because of how the * This is done in reverse order, because of how the
* user environment and arguments are stored. * user environment and arguments are stored.
*/ */
remove_arg_zero(bprm); retval = remove_arg_zero(bprm);
if (retval)
return retval;
retval = copy_strings_kernel(1, &bprm->interp, bprm); retval = copy_strings_kernel(1, &bprm->interp, bprm);
if (retval < 0) return retval; if (retval < 0) return retval;
bprm->argc++; bprm->argc++;
......
...@@ -1257,6 +1257,7 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv, ...@@ -1257,6 +1257,7 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
{ {
struct page *kmapped_page = NULL; struct page *kmapped_page = NULL;
char *kaddr = NULL; char *kaddr = NULL;
unsigned long kpos = 0;
int ret; int ret;
while (argc-- > 0) { while (argc-- > 0) {
...@@ -1265,92 +1266,84 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv, ...@@ -1265,92 +1266,84 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
unsigned long pos; unsigned long pos;
if (get_user(str, argv+argc) || if (get_user(str, argv+argc) ||
!(len = strnlen_user(compat_ptr(str), bprm->p))) { !(len = strnlen_user(compat_ptr(str), MAX_ARG_STRLEN))) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
if (bprm->p < len) { if (len > MAX_ARG_STRLEN) {
ret = -E2BIG; ret = -E2BIG;
goto out; goto out;
} }
bprm->p -= len; /* We're going to work our way backwords. */
/* XXX: add architecture specific overflow check here. */
pos = bprm->p; pos = bprm->p;
str += len;
bprm->p -= len;
while (len > 0) { while (len > 0) {
int i, new, err;
int offset, bytes_to_copy; int offset, bytes_to_copy;
struct page *page;
offset = pos % PAGE_SIZE; offset = pos % PAGE_SIZE;
i = pos/PAGE_SIZE; if (offset == 0)
page = bprm->page[i]; offset = PAGE_SIZE;
new = 0;
if (!page) { bytes_to_copy = offset;
page = alloc_page(GFP_HIGHUSER); if (bytes_to_copy > len)
bprm->page[i] = page; bytes_to_copy = len;
if (!page) {
ret = -ENOMEM; offset -= bytes_to_copy;
pos -= bytes_to_copy;
str -= bytes_to_copy;
len -= bytes_to_copy;
if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
struct page *page;
#ifdef CONFIG_STACK_GROWSUP
ret = expand_stack_downwards(bprm->vma, pos);
if (ret < 0) {
/* We've exceed the stack rlimit. */
ret = -E2BIG;
goto out;
}
#endif
ret = get_user_pages(current, bprm->mm, pos,
1, 1, 1, &page, NULL);
if (ret <= 0) {
/* We've exceed the stack rlimit. */
ret = -E2BIG;
goto out; goto out;
} }
new = 1;
}
if (page != kmapped_page) { if (kmapped_page) {
if (kmapped_page) flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page); kunmap(kmapped_page);
put_page(kmapped_page);
}
kmapped_page = page; kmapped_page = page;
kaddr = kmap(kmapped_page); kaddr = kmap(kmapped_page);
kpos = pos & PAGE_MASK;
flush_cache_page(bprm->vma, kpos,
page_to_pfn(kmapped_page));
} }
if (new && offset) if (copy_from_user(kaddr+offset, compat_ptr(str),
memset(kaddr, 0, offset); bytes_to_copy)) {
bytes_to_copy = PAGE_SIZE - offset;
if (bytes_to_copy > len) {
bytes_to_copy = len;
if (new)
memset(kaddr+offset+len, 0,
PAGE_SIZE-offset-len);
}
err = copy_from_user(kaddr+offset, compat_ptr(str),
bytes_to_copy);
if (err) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
pos += bytes_to_copy;
str += bytes_to_copy;
len -= bytes_to_copy;
} }
} }
ret = 0; ret = 0;
out: out:
if (kmapped_page) if (kmapped_page) {
flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page); kunmap(kmapped_page);
return ret; put_page(kmapped_page);
}
#ifdef CONFIG_MMU
#define free_arg_pages(bprm) do { } while (0)
#else
static inline void free_arg_pages(struct linux_binprm *bprm)
{
int i;
for (i = 0; i < MAX_ARG_PAGES; i++) {
if (bprm->page[i])
__free_page(bprm->page[i]);
bprm->page[i] = NULL;
} }
return ret;
} }
#endif /* CONFIG_MMU */
/* /*
* compat_do_execve() is mostly a copy of do_execve(), with the exception * compat_do_execve() is mostly a copy of do_execve(), with the exception
* that it processes 32 bit argv and envp pointers. * that it processes 32 bit argv and envp pointers.
...@@ -1363,7 +1356,6 @@ int compat_do_execve(char * filename, ...@@ -1363,7 +1356,6 @@ int compat_do_execve(char * filename,
struct linux_binprm *bprm; struct linux_binprm *bprm;
struct file *file; struct file *file;
int retval; int retval;
int i;
retval = -ENOMEM; retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
...@@ -1377,24 +1369,19 @@ int compat_do_execve(char * filename, ...@@ -1377,24 +1369,19 @@ int compat_do_execve(char * filename,
sched_exec(); sched_exec();
bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
bprm->file = file; bprm->file = file;
bprm->filename = filename; bprm->filename = filename;
bprm->interp = filename; bprm->interp = filename;
bprm->mm = mm_alloc();
retval = -ENOMEM;
if (!bprm->mm)
goto out_file;
retval = init_new_context(current, bprm->mm); retval = bprm_mm_init(bprm);
if (retval < 0) if (retval)
goto out_mm; goto out_file;
bprm->argc = compat_count(argv, bprm->p / sizeof(compat_uptr_t)); bprm->argc = compat_count(argv, MAX_ARG_STRINGS);
if ((retval = bprm->argc) < 0) if ((retval = bprm->argc) < 0)
goto out_mm; goto out_mm;
bprm->envc = compat_count(envp, bprm->p / sizeof(compat_uptr_t)); bprm->envc = compat_count(envp, MAX_ARG_STRINGS);
if ((retval = bprm->envc) < 0) if ((retval = bprm->envc) < 0)
goto out_mm; goto out_mm;
...@@ -1421,8 +1408,6 @@ int compat_do_execve(char * filename, ...@@ -1421,8 +1408,6 @@ int compat_do_execve(char * filename,
retval = search_binary_handler(bprm, regs); retval = search_binary_handler(bprm, regs);
if (retval >= 0) { if (retval >= 0) {
free_arg_pages(bprm);
/* execve success */ /* execve success */
security_bprm_free(bprm); security_bprm_free(bprm);
acct_update_integrals(current); acct_update_integrals(current);
...@@ -1431,19 +1416,12 @@ int compat_do_execve(char * filename, ...@@ -1431,19 +1416,12 @@ int compat_do_execve(char * filename,
} }
out: out:
/* Something went wrong, return the inode and free the argument pages*/
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
struct page * page = bprm->page[i];
if (page)
__free_page(page);
}
if (bprm->security) if (bprm->security)
security_bprm_free(bprm); security_bprm_free(bprm);
out_mm: out_mm:
if (bprm->mm) if (bprm->mm)
mmdrop(bprm->mm); mmput(bprm->mm);
out_file: out_file:
if (bprm->file) { if (bprm->file) {
......
This diff is collapsed.
...@@ -6,11 +6,13 @@ ...@@ -6,11 +6,13 @@
struct pt_regs; struct pt_regs;
/* /*
* MAX_ARG_PAGES defines the number of pages allocated for arguments * These are the maximum length and maximum number of strings passed to the
* and envelope for the new program. 32 should suffice, this gives * execve() system call. MAX_ARG_STRLEN is essentially random but serves to
* a maximum env+arg of 128kB w/4KB pages! * prevent the kernel from being unduly impacted by misaddressed pointers.
* MAX_ARG_STRINGS is chosen to fit in a signed 32-bit integer.
*/ */
#define MAX_ARG_PAGES 32 #define MAX_ARG_STRLEN (PAGE_SIZE * 32)
#define MAX_ARG_STRINGS 0x7FFFFFFF
/* sizeof(linux_binprm->buf) */ /* sizeof(linux_binprm->buf) */
#define BINPRM_BUF_SIZE 128 #define BINPRM_BUF_SIZE 128
...@@ -24,7 +26,12 @@ struct pt_regs; ...@@ -24,7 +26,12 @@ struct pt_regs;
*/ */
struct linux_binprm{ struct linux_binprm{
char buf[BINPRM_BUF_SIZE]; char buf[BINPRM_BUF_SIZE];
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES]; struct page *page[MAX_ARG_PAGES];
#endif
struct mm_struct *mm; struct mm_struct *mm;
unsigned long p; /* current top of mem */ unsigned long p; /* current top of mem */
int sh_bang; int sh_bang;
...@@ -69,7 +76,7 @@ extern int register_binfmt(struct linux_binfmt *); ...@@ -69,7 +76,7 @@ extern int register_binfmt(struct linux_binfmt *);
extern int unregister_binfmt(struct linux_binfmt *); extern int unregister_binfmt(struct linux_binfmt *);
extern int prepare_binprm(struct linux_binprm *); extern int prepare_binprm(struct linux_binprm *);
extern void remove_arg_zero(struct linux_binprm *); extern int __must_check remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
extern int flush_old_exec(struct linux_binprm * bprm); extern int flush_old_exec(struct linux_binprm * bprm);
...@@ -86,6 +93,7 @@ extern int suid_dumpable; ...@@ -86,6 +93,7 @@ extern int suid_dumpable;
extern int setup_arg_pages(struct linux_binprm * bprm, extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top, unsigned long stack_top,
int executable_stack); int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
extern void compute_creds(struct linux_binprm *binprm); extern void compute_creds(struct linux_binprm *binprm);
extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
......
...@@ -808,7 +808,6 @@ static inline int handle_mm_fault(struct mm_struct *mm, ...@@ -808,7 +808,6 @@ static inline int handle_mm_fault(struct mm_struct *mm,
extern int make_pages_present(unsigned long addr, unsigned long end); extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
...@@ -825,9 +824,15 @@ int FASTCALL(set_page_dirty(struct page *page)); ...@@ -825,9 +824,15 @@ int FASTCALL(set_page_dirty(struct page *page));
int set_page_dirty_lock(struct page *page); int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page); int clear_page_dirty_for_io(struct page *page);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
extern unsigned long do_mremap(unsigned long addr, extern unsigned long do_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr); unsigned long flags, unsigned long new_addr);
extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
/* /*
* A callback you can register to apply pressure to ageable caches. * A callback you can register to apply pressure to ageable caches.
...@@ -1159,6 +1164,8 @@ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); ...@@ -1159,6 +1164,8 @@ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
#ifdef CONFIG_IA64 #ifdef CONFIG_IA64
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#endif #endif
extern int expand_stack_downwards(struct vm_area_struct *vma,
unsigned long address);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
......
...@@ -843,7 +843,7 @@ static void audit_log_execve_info(struct audit_buffer *ab, ...@@ -843,7 +843,7 @@ static void audit_log_execve_info(struct audit_buffer *ab,
return; /* execve failed, no additional info */ return; /* execve failed, no additional info */
for (i = 0; i < axi->argc; i++, p += len) { for (i = 0; i < axi->argc; i++, p += len) {
len = strnlen_user(p, MAX_ARG_PAGES*PAGE_SIZE); len = strnlen_user(p, MAX_ARG_STRLEN);
/* /*
* We just created this mm, if we can't find the strings * We just created this mm, if we can't find the strings
* we just copied into it something is _very_ wrong. Similar * we just copied into it something is _very_ wrong. Similar
......
...@@ -1571,33 +1571,11 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -1571,33 +1571,11 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
} }
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
addr &= PAGE_MASK;
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED) {
make_pages_present(addr, prev->vm_end);
}
return prev;
}
#else
/* /*
* vma is the first one with address < vma->vm_start. Have to extend vma. * vma is the first one with address < vma->vm_start. Have to extend vma.
*/ */
int expand_stack(struct vm_area_struct *vma, unsigned long address) static inline int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{ {
int error; int error;
...@@ -1634,6 +1612,38 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) ...@@ -1634,6 +1612,38 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
return error; return error;
} }
int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
{
return expand_downwards(vma, address);
}
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
addr &= PAGE_MASK;
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
make_pages_present(addr, prev->vm_end);
return prev;
}
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_downwards(vma, address);
}
struct vm_area_struct * struct vm_area_struct *
find_extend_vma(struct mm_struct * mm, unsigned long addr) find_extend_vma(struct mm_struct * mm, unsigned long addr)
{ {
...@@ -1651,9 +1661,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) ...@@ -1651,9 +1661,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
start = vma->vm_start; start = vma->vm_start;
if (expand_stack(vma, addr)) if (expand_stack(vma, addr))
return NULL; return NULL;
if (vma->vm_flags & VM_LOCKED) { if (vma->vm_flags & VM_LOCKED)
make_pages_present(addr, start); make_pages_present(addr, start);
}
return vma; return vma;
} }
#endif #endif
......
...@@ -128,7 +128,7 @@ static void change_protection(struct vm_area_struct *vma, ...@@ -128,7 +128,7 @@ static void change_protection(struct vm_area_struct *vma,
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
static int int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags) unsigned long start, unsigned long end, unsigned long newflags)
{ {
......
...@@ -120,7 +120,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, ...@@ -120,7 +120,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
#define LATENCY_LIMIT (64 * PAGE_SIZE) #define LATENCY_LIMIT (64 * PAGE_SIZE)
static unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len) unsigned long new_addr, unsigned long len)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment