Commit faa309e7 authored by Hugh Dickins's avatar Hugh Dickins Committed by Adrian Bunk

read_zero_pagealigned() locking fix

Ramiro Voicu hits the BUG_ON(!pte_none(*pte)) in zeromap_pte_range: kernel
bugzilla 7645.  Right: read_zero_pagealigned uses down_read of mmap_sem,
but another thread's racing read of /dev/zero, or a normal fault, can
easily set that pte again, in between zap_page_range and zeromap_page_range
getting there.  It's been wrong ever since 2.4.3.

The simple fix is to use down_write instead, but that would serialize reads
of /dev/zero more than at present: perhaps some app would be badly
affected.  So instead let zeromap_page_range return the error instead of
BUG_ON, and read_zero_pagealigned break to the slower clear_user loop in
that case - there's no need to optimize for it.

Use -EEXIST for when a pte is found: BUG_ON in mmap_zero (the other user of
zeromap_page_range), though it really isn't interesting there.  And since
mmap_zero wants -EAGAIN for out-of-memory, the zeromaps better return that
than -ENOMEM.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
parent 891ff634
...@@ -613,7 +613,8 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size) ...@@ -613,7 +613,8 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
count = size; count = size;
zap_page_range(vma, addr, count, NULL); zap_page_range(vma, addr, count, NULL);
zeromap_page_range(vma, addr, count, PAGE_COPY); if (zeromap_page_range(vma, addr, count, PAGE_COPY))
break;
size -= count; size -= count;
buf += count; buf += count;
...@@ -680,11 +681,14 @@ static ssize_t read_zero(struct file * file, char __user * buf, ...@@ -680,11 +681,14 @@ static ssize_t read_zero(struct file * file, char __user * buf,
static int mmap_zero(struct file * file, struct vm_area_struct * vma) static int mmap_zero(struct file * file, struct vm_area_struct * vma)
{ {
int err;
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma); return shmem_zero_setup(vma);
if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) err = zeromap_page_range(vma, vma->vm_start,
return -EAGAIN; vma->vm_end - vma->vm_start, vma->vm_page_prot);
return 0; BUG_ON(err == -EEXIST);
return err;
} }
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
static ssize_t read_zero(struct file * file, char * buf, static ssize_t read_zero(struct file * file, char * buf,
......
...@@ -1092,21 +1092,27 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, ...@@ -1092,21 +1092,27 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
{ {
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
int err = 0;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte) if (!pte)
return -ENOMEM; return -EAGAIN;
do { do {
struct page *page = ZERO_PAGE(addr); struct page *page = ZERO_PAGE(addr);
pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
if (unlikely(!pte_none(*pte))) {
err = -EEXIST;
pte++;
break;
}
page_cache_get(page); page_cache_get(page);
page_add_file_rmap(page); page_add_file_rmap(page);
inc_mm_counter(mm, file_rss); inc_mm_counter(mm, file_rss);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, zero_pte); set_pte_at(mm, addr, pte, zero_pte);
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl); pte_unmap_unlock(pte - 1, ptl);
return 0; return err;
} }
static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
...@@ -1114,16 +1120,18 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, ...@@ -1114,16 +1120,18 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long next; unsigned long next;
int err;
pmd = pmd_alloc(mm, pud, addr); pmd = pmd_alloc(mm, pud, addr);
if (!pmd) if (!pmd)
return -ENOMEM; return -EAGAIN;
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (zeromap_pte_range(mm, pmd, addr, next, prot)) err = zeromap_pte_range(mm, pmd, addr, next, prot);
return -ENOMEM; if (err)
break;
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
return 0; return err;
} }
static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
...@@ -1131,16 +1139,18 @@ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, ...@@ -1131,16 +1139,18 @@ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
{ {
pud_t *pud; pud_t *pud;
unsigned long next; unsigned long next;
int err;
pud = pud_alloc(mm, pgd, addr); pud = pud_alloc(mm, pgd, addr);
if (!pud) if (!pud)
return -ENOMEM; return -EAGAIN;
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (zeromap_pmd_range(mm, pud, addr, next, prot)) err = zeromap_pmd_range(mm, pud, addr, next, prot);
return -ENOMEM; if (err)
break;
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
return 0; return err;
} }
int zeromap_page_range(struct vm_area_struct *vma, int zeromap_page_range(struct vm_area_struct *vma,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment