Commit b81a618d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6:
  deal with races in /proc/*/{syscall,stack,personality}
  proc: enable writing to /proc/pid/mem
  proc: make check_mem_permission() return an mm_struct on success
  proc: hold cred_guard_mutex in check_mem_permission()
  proc: disable mem_write after exec
  mm: implement access_remote_vm
  mm: factor out main logic of access_process_vm
  mm: use mm_struct to resolve gate vma's in __get_user_pages
  mm: arch: rename in_gate_area_no_task to in_gate_area_no_mm
  mm: arch: make in_gate_area take an mm_struct instead of a task_struct
  mm: arch: make get_gate_vma take an mm_struct instead of a task_struct
  x86: mark associated mm when running a task in 32 bit compatibility mode
  x86: add context tag to mark mm when running a task in 32-bit compatibility mode
  auxv: require the target to be tracable (or yourself)
  close race in /proc/*/environ
  report errors in /proc/*/*map* sanely
  pagemap: close races with suid execve
  make sessionid permissions in /proc/*/task/* match those in /proc/*
  fix leaks in path_lookupat()

Fix up trivial conflicts in fs/proc/base.c
parents 2f284c84 a9712bc1
...@@ -820,17 +820,17 @@ static int __init vdso_init(void) ...@@ -820,17 +820,17 @@ static int __init vdso_init(void)
} }
arch_initcall(vdso_init); arch_initcall(vdso_init);
int in_gate_area_no_task(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
return 0; return 0;
} }
int in_gate_area(struct task_struct *task, unsigned long addr) int in_gate_area(struct mm_struct *mm, unsigned long addr)
{ {
return 0; return 0;
} }
struct vm_area_struct *get_gate_vma(struct task_struct *tsk) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{ {
return NULL; return NULL;
} }
......
...@@ -337,17 +337,17 @@ static int __init vdso_init(void) ...@@ -337,17 +337,17 @@ static int __init vdso_init(void)
} }
arch_initcall(vdso_init); arch_initcall(vdso_init);
int in_gate_area_no_task(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
return 0; return 0;
} }
int in_gate_area(struct task_struct *task, unsigned long addr) int in_gate_area(struct mm_struct *mm, unsigned long addr)
{ {
return 0; return 0;
} }
struct vm_area_struct *get_gate_vma(struct task_struct *tsk) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{ {
return NULL; return NULL;
} }
...@@ -94,17 +94,17 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -94,17 +94,17 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL; return NULL;
} }
struct vm_area_struct *get_gate_vma(struct task_struct *task) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{ {
return NULL; return NULL;
} }
int in_gate_area(struct task_struct *task, unsigned long address) int in_gate_area(struct mm_struct *mm, unsigned long address)
{ {
return 0; return 0;
} }
int in_gate_area_no_task(unsigned long address) int in_gate_area_no_mm(unsigned long address)
{ {
return 0; return 0;
} }
...@@ -298,6 +298,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) ...@@ -298,6 +298,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */ /* OK, This is the point of no return */
set_personality(PER_LINUX); set_personality(PER_LINUX);
set_thread_flag(TIF_IA32); set_thread_flag(TIF_IA32);
current->mm->context.ia32_compat = 1;
setup_new_exec(bprm); setup_new_exec(bprm);
......
...@@ -13,6 +13,12 @@ typedef struct { ...@@ -13,6 +13,12 @@ typedef struct {
int size; int size;
struct mutex lock; struct mutex lock;
void *vdso; void *vdso;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
unsigned short ia32_compat;
#endif
} mm_context_t; } mm_context_t;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -501,6 +501,10 @@ void set_personality_64bit(void) ...@@ -501,6 +501,10 @@ void set_personality_64bit(void)
/* Make sure to be in 64bit mode */ /* Make sure to be in 64bit mode */
clear_thread_flag(TIF_IA32); clear_thread_flag(TIF_IA32);
/* Ensure the corresponding mm is not marked. */
if (current->mm)
current->mm->context.ia32_compat = 0;
/* TBD: overwrites user setup. Should have two bits. /* TBD: overwrites user setup. Should have two bits.
But 64bit processes have always behaved this way, But 64bit processes have always behaved this way,
so it's not too bad. The main problem is just that so it's not too bad. The main problem is just that
...@@ -516,6 +520,10 @@ void set_personality_ia32(void) ...@@ -516,6 +520,10 @@ void set_personality_ia32(void)
set_thread_flag(TIF_IA32); set_thread_flag(TIF_IA32);
current->personality |= force_personality32; current->personality |= force_personality32;
/* Mark the associated mm as containing 32-bit tasks. */
if (current->mm)
current->mm->context.ia32_compat = 1;
/* Prepare the first "return" to user space */ /* Prepare the first "return" to user space */
current_thread_info()->status |= TS_COMPAT; current_thread_info()->status |= TS_COMPAT;
} }
......
...@@ -862,18 +862,18 @@ static struct vm_area_struct gate_vma = { ...@@ -862,18 +862,18 @@ static struct vm_area_struct gate_vma = {
.vm_flags = VM_READ | VM_EXEC .vm_flags = VM_READ | VM_EXEC
}; };
struct vm_area_struct *get_gate_vma(struct task_struct *tsk) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{ {
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(tsk, TIF_IA32)) if (!mm || mm->context.ia32_compat)
return NULL; return NULL;
#endif #endif
return &gate_vma; return &gate_vma;
} }
int in_gate_area(struct task_struct *task, unsigned long addr) int in_gate_area(struct mm_struct *mm, unsigned long addr)
{ {
struct vm_area_struct *vma = get_gate_vma(task); struct vm_area_struct *vma = get_gate_vma(mm);
if (!vma) if (!vma)
return 0; return 0;
...@@ -882,11 +882,11 @@ int in_gate_area(struct task_struct *task, unsigned long addr) ...@@ -882,11 +882,11 @@ int in_gate_area(struct task_struct *task, unsigned long addr)
} }
/* /*
* Use this when you have no reliable task/vma, typically from interrupt * Use this when you have no reliable mm, typically from interrupt
* context. It is less reliable than using the task's vma and may give * context. It is less reliable than using a task's mm and may give
* false positives: * false positives.
*/ */
int in_gate_area_no_task(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
} }
......
...@@ -417,24 +417,25 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -417,24 +417,25 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL; return NULL;
} }
struct vm_area_struct *get_gate_vma(struct task_struct *tsk) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{ {
struct mm_struct *mm = tsk->mm; /*
* Check to see if the corresponding task was created in compat vdso
/* Check to see if this task was created in compat vdso mode */ * mode.
*/
if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
return &gate_vma; return &gate_vma;
return NULL; return NULL;
} }
int in_gate_area(struct task_struct *task, unsigned long addr) int in_gate_area(struct mm_struct *mm, unsigned long addr)
{ {
const struct vm_area_struct *vma = get_gate_vma(task); const struct vm_area_struct *vma = get_gate_vma(mm);
return vma && addr >= vma->vm_start && addr < vma->vm_end; return vma && addr >= vma->vm_start && addr < vma->vm_end;
} }
int in_gate_area_no_task(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
return 0; return 0;
} }
......
...@@ -1906,7 +1906,7 @@ static int elf_core_dump(struct coredump_params *cprm) ...@@ -1906,7 +1906,7 @@ static int elf_core_dump(struct coredump_params *cprm)
segs = current->mm->map_count; segs = current->mm->map_count;
segs += elf_core_extra_phdrs(); segs += elf_core_extra_phdrs();
gate_vma = get_gate_vma(current); gate_vma = get_gate_vma(current->mm);
if (gate_vma != NULL) if (gate_vma != NULL)
segs++; segs++;
......
...@@ -1650,13 +1650,16 @@ static int path_lookupat(int dfd, const char *name, ...@@ -1650,13 +1650,16 @@ static int path_lookupat(int dfd, const char *name,
err = -ECHILD; err = -ECHILD;
} }
if (!err) if (!err) {
err = handle_reval_path(nd); err = handle_reval_path(nd);
if (err)
path_put(&nd->path);
}
if (!err && nd->flags & LOOKUP_DIRECTORY) { if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) { if (!nd->inode->i_op->lookup) {
path_put(&nd->path); path_put(&nd->path);
return -ENOTDIR; err = -ENOTDIR;
} }
} }
......
This diff is collapsed.
...@@ -121,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos) ...@@ -121,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos)
priv->task = get_pid_task(priv->pid, PIDTYPE_PID); priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task) if (!priv->task)
return NULL; return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task); mm = mm_for_maps(priv->task);
if (!mm) if (!mm || IS_ERR(mm))
return NULL; return mm;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
tail_vma = get_gate_vma(priv->task); tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma; priv->tail_vma = tail_vma;
/* Start with last addr hint */ /* Start with last addr hint */
...@@ -279,7 +279,8 @@ static int show_map(struct seq_file *m, void *v) ...@@ -279,7 +279,8 @@ static int show_map(struct seq_file *m, void *v)
show_map_vma(m, vma); show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */ if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0; return 0;
} }
...@@ -468,7 +469,8 @@ static int show_smap(struct seq_file *m, void *v) ...@@ -468,7 +469,8 @@ static int show_smap(struct seq_file *m, void *v)
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */ if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0; return 0;
} }
...@@ -764,8 +766,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, ...@@ -764,8 +766,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!task) if (!task)
goto out; goto out;
ret = -EACCES; mm = mm_for_maps(task);
if (!ptrace_may_access(task, PTRACE_MODE_READ)) ret = PTR_ERR(mm);
if (!mm || IS_ERR(mm))
goto out_task; goto out_task;
ret = -EINVAL; ret = -EINVAL;
...@@ -778,10 +781,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, ...@@ -778,10 +781,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!count) if (!count)
goto out_task; goto out_task;
mm = get_task_mm(task);
if (!mm)
goto out_task;
pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -199,13 +199,13 @@ static void *m_start(struct seq_file *m, loff_t *pos) ...@@ -199,13 +199,13 @@ static void *m_start(struct seq_file *m, loff_t *pos)
/* pin the task and mm whilst we play with them */ /* pin the task and mm whilst we play with them */
priv->task = get_pid_task(priv->pid, PIDTYPE_PID); priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task) if (!priv->task)
return NULL; return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task); mm = mm_for_maps(priv->task);
if (!mm) { if (!mm || IS_ERR(mm)) {
put_task_struct(priv->task); put_task_struct(priv->task);
priv->task = NULL; priv->task = NULL;
return NULL; return mm;
} }
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
...@@ -982,6 +982,8 @@ static inline int handle_mm_fault(struct mm_struct *mm, ...@@ -982,6 +982,8 @@ static inline int handle_mm_fault(struct mm_struct *mm,
extern int make_pages_present(unsigned long addr, unsigned long end); extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write);
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, unsigned int foll_flags, unsigned long start, int len, unsigned int foll_flags,
...@@ -1592,13 +1594,13 @@ static inline bool kernel_page_present(struct page *page) { return true; } ...@@ -1592,13 +1594,13 @@ static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
#endif #endif
extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
#ifdef __HAVE_ARCH_GATE_AREA #ifdef __HAVE_ARCH_GATE_AREA
int in_gate_area_no_task(unsigned long addr); int in_gate_area_no_mm(unsigned long addr);
int in_gate_area(struct task_struct *task, unsigned long addr); int in_gate_area(struct mm_struct *mm, unsigned long addr);
#else #else
int in_gate_area_no_task(unsigned long addr); int in_gate_area_no_mm(unsigned long addr);
#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
#endif /* __HAVE_ARCH_GATE_AREA */ #endif /* __HAVE_ARCH_GATE_AREA */
int drop_caches_sysctl_handler(struct ctl_table *, int, int drop_caches_sysctl_handler(struct ctl_table *, int,
......
...@@ -64,14 +64,14 @@ static inline int is_kernel_text(unsigned long addr) ...@@ -64,14 +64,14 @@ static inline int is_kernel_text(unsigned long addr)
if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
arch_is_kernel_text(addr)) arch_is_kernel_text(addr))
return 1; return 1;
return in_gate_area_no_task(addr); return in_gate_area_no_mm(addr);
} }
static inline int is_kernel(unsigned long addr) static inline int is_kernel(unsigned long addr)
{ {
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
return 1; return 1;
return in_gate_area_no_task(addr); return in_gate_area_no_mm(addr);
} }
static int is_ksym_addr(unsigned long addr) static int is_ksym_addr(unsigned long addr)
......
...@@ -1486,9 +1486,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1486,9 +1486,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma; struct vm_area_struct *vma;
vma = find_extend_vma(mm, start); vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) { if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK; unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk); struct vm_area_struct *gate_vma = get_gate_vma(mm);
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
...@@ -1591,10 +1591,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1591,10 +1591,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return i ? i : -EFAULT; return i ? i : -EFAULT;
BUG(); BUG();
} }
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++; if (tsk) {
else if (ret & VM_FAULT_MAJOR)
tsk->min_flt++; tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
if (nonblocking) if (nonblocking)
...@@ -1641,7 +1644,8 @@ EXPORT_SYMBOL(__get_user_pages); ...@@ -1641,7 +1644,8 @@ EXPORT_SYMBOL(__get_user_pages);
/** /**
* get_user_pages() - pin user pages in memory * get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task * @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm * @mm: mm_struct of target mm
* @start: starting user address * @start: starting user address
* @nr_pages: number of pages from start to pin * @nr_pages: number of pages from start to pin
...@@ -3499,7 +3503,7 @@ static int __init gate_vma_init(void) ...@@ -3499,7 +3503,7 @@ static int __init gate_vma_init(void)
__initcall(gate_vma_init); __initcall(gate_vma_init);
#endif #endif
struct vm_area_struct *get_gate_vma(struct task_struct *tsk) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{ {
#ifdef AT_SYSINFO_EHDR #ifdef AT_SYSINFO_EHDR
return &gate_vma; return &gate_vma;
...@@ -3508,7 +3512,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk) ...@@ -3508,7 +3512,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
#endif #endif
} }
int in_gate_area_no_task(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
#ifdef AT_SYSINFO_EHDR #ifdef AT_SYSINFO_EHDR
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
...@@ -3649,20 +3653,15 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, ...@@ -3649,20 +3653,15 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
#endif #endif
/* /*
* Access another process' address space. * Access another process' address space as given in mm. If non-NULL, use the
* Source/target buffer must be kernel space, * given task for page fault accounting.
* Do not walk the page table directly, use get_user_pages
*/ */
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, int write)
{ {
struct mm_struct *mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
void *old_buf = buf; void *old_buf = buf;
mm = get_task_mm(tsk);
if (!mm)
return 0;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */ /* ignore errors, just check how much was successfully transferred */
while (len) { while (len) {
...@@ -3711,11 +3710,47 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in ...@@ -3711,11 +3710,47 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
addr += bytes; addr += bytes;
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
mmput(mm);
return buf - old_buf; return buf - old_buf;
} }
/**
* @access_remote_vm - access another process' address space
* @mm: the mm_struct of the target address space
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
* @write: whether the access is a write
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write)
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
/*
* Access another process' address space.
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
mmput(mm);
return ret;
}
/* /*
* Print the name of a VMA. * Print the name of a VMA.
*/ */
......
...@@ -237,7 +237,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -237,7 +237,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
is_vm_hugetlb_page(vma) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) { vma == get_gate_vma(current->mm))) {
__mlock_vma_pages_range(vma, start, end, NULL); __mlock_vma_pages_range(vma, start, end, NULL);
...@@ -332,7 +332,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, ...@@ -332,7 +332,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
int lock = newflags & VM_LOCKED; int lock = newflags & VM_LOCKED;
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current)) is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
goto out; /* don't set VM_LOCKED, don't count */ goto out; /* don't set VM_LOCKED, don't count */
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
......
...@@ -1963,7 +1963,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) ...@@ -1963,7 +1963,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
return -ENOMEM; return -ENOMEM;
} }
int in_gate_area_no_task(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment