Commit c36987e2 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds

mm: don't call pte_unmap() against an improper pte

There are some places where we do like:

	pte = pte_map();
	do {
		(do break in some conditions)
	} while (pte++, ...);
	pte_unmap(pte - 1);

But if the loop breaks at the first loop, pte_unmap() unmaps invalid pte.

This patch is a fix for this problem.
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Reviewd-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2545f038
...@@ -641,6 +641,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -641,6 +641,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
pte_t *orig_src_pte, *orig_dst_pte;
pte_t *src_pte, *dst_pte; pte_t *src_pte, *dst_pte;
spinlock_t *src_ptl, *dst_ptl; spinlock_t *src_ptl, *dst_ptl;
int progress = 0; int progress = 0;
...@@ -654,6 +655,8 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -654,6 +655,8 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
src_pte = pte_offset_map_nested(src_pmd, addr); src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd); src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
do { do {
...@@ -677,9 +680,9 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -677,9 +680,9 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl); spin_unlock(src_ptl);
pte_unmap_nested(src_pte - 1); pte_unmap_nested(orig_src_pte);
add_mm_rss(dst_mm, rss[0], rss[1]); add_mm_rss(dst_mm, rss[0], rss[1]);
pte_unmap_unlock(dst_pte - 1, dst_ptl); pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched(); cond_resched();
if (addr != end) if (addr != end)
goto again; goto again;
...@@ -1820,10 +1823,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, ...@@ -1820,10 +1823,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
token = pmd_pgtable(*pmd); token = pmd_pgtable(*pmd);
do { do {
err = fn(pte, token, addr, data); err = fn(pte++, token, addr, data);
if (err) if (err)
break; break;
} while (pte++, addr += PAGE_SIZE, addr != end); } while (addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment