Commit a41b70d6 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: use vmf->page during WP faults

So far we set vmf->page during WP faults only when we needed to pass it
to the ->page_mkwrite handler.  Set it in all the cases now and use that
instead of passing page pointer explicitly around.

Link: http://lkml.kernel.org/r/1479460644-25076-14-git-send-email-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 38b8cb7f
...@@ -2102,11 +2102,12 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma, ...@@ -2102,11 +2102,12 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
* case, all we need to do here is to mark the page as writable and update * case, all we need to do here is to mark the page as writable and update
* any related book-keeping. * any related book-keeping.
*/ */
static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, static inline int wp_page_reuse(struct vm_fault *vmf,
int page_mkwrite, int dirty_shared) int page_mkwrite, int dirty_shared)
__releases(vmf->ptl) __releases(vmf->ptl)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *page = vmf->page;
pte_t entry; pte_t entry;
/* /*
* Clear the pages cpupid information as the existing * Clear the pages cpupid information as the existing
...@@ -2150,10 +2151,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, ...@@ -2150,10 +2151,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page,
* held to the old page, as well as updating the rmap. * held to the old page, as well as updating the rmap.
* - In any case, unlock the PTL and drop the reference we took to the old page. * - In any case, unlock the PTL and drop the reference we took to the old page.
*/ */
static int wp_page_copy(struct vm_fault *vmf, struct page *old_page) static int wp_page_copy(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct page *old_page = vmf->page;
struct page *new_page = NULL; struct page *new_page = NULL;
pte_t entry; pte_t entry;
int page_copied = 0; int page_copied = 0;
...@@ -2305,26 +2307,25 @@ static int wp_pfn_shared(struct vm_fault *vmf) ...@@ -2305,26 +2307,25 @@ static int wp_pfn_shared(struct vm_fault *vmf)
return 0; return 0;
} }
} }
return wp_page_reuse(vmf, NULL, 0, 0); return wp_page_reuse(vmf, 0, 0);
} }
static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) static int wp_page_shared(struct vm_fault *vmf)
__releases(vmf->ptl) __releases(vmf->ptl)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
int page_mkwrite = 0; int page_mkwrite = 0;
get_page(old_page); get_page(vmf->page);
if (vma->vm_ops && vma->vm_ops->page_mkwrite) { if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
int tmp; int tmp;
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf->page = old_page;
tmp = do_page_mkwrite(vmf); tmp = do_page_mkwrite(vmf);
if (unlikely(!tmp || (tmp & if (unlikely(!tmp || (tmp &
(VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
put_page(old_page); put_page(vmf->page);
return tmp; return tmp;
} }
/* /*
...@@ -2336,15 +2337,15 @@ static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) ...@@ -2336,15 +2337,15 @@ static int wp_page_shared(struct vm_fault *vmf, struct page *old_page)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl); vmf->address, &vmf->ptl);
if (!pte_same(*vmf->pte, vmf->orig_pte)) { if (!pte_same(*vmf->pte, vmf->orig_pte)) {
unlock_page(old_page); unlock_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
put_page(old_page); put_page(vmf->page);
return 0; return 0;
} }
page_mkwrite = 1; page_mkwrite = 1;
} }
return wp_page_reuse(vmf, old_page, page_mkwrite, 1); return wp_page_reuse(vmf, page_mkwrite, 1);
} }
/* /*
...@@ -2369,10 +2370,9 @@ static int do_wp_page(struct vm_fault *vmf) ...@@ -2369,10 +2370,9 @@ static int do_wp_page(struct vm_fault *vmf)
__releases(vmf->ptl) __releases(vmf->ptl)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *old_page;
old_page = vm_normal_page(vma, vmf->address, vmf->orig_pte); vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
if (!old_page) { if (!vmf->page) {
/* /*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
* VM_PFNMAP VMA. * VM_PFNMAP VMA.
...@@ -2385,30 +2385,30 @@ static int do_wp_page(struct vm_fault *vmf) ...@@ -2385,30 +2385,30 @@ static int do_wp_page(struct vm_fault *vmf)
return wp_pfn_shared(vmf); return wp_pfn_shared(vmf);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
return wp_page_copy(vmf, old_page); return wp_page_copy(vmf);
} }
/* /*
* Take out anonymous pages first, anonymous shared vmas are * Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable. * not dirty accountable.
*/ */
if (PageAnon(old_page) && !PageKsm(old_page)) { if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
int total_mapcount; int total_mapcount;
if (!trylock_page(old_page)) { if (!trylock_page(vmf->page)) {
get_page(old_page); get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
lock_page(old_page); lock_page(vmf->page);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl); vmf->address, &vmf->ptl);
if (!pte_same(*vmf->pte, vmf->orig_pte)) { if (!pte_same(*vmf->pte, vmf->orig_pte)) {
unlock_page(old_page); unlock_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
put_page(old_page); put_page(vmf->page);
return 0; return 0;
} }
put_page(old_page); put_page(vmf->page);
} }
if (reuse_swap_page(old_page, &total_mapcount)) { if (reuse_swap_page(vmf->page, &total_mapcount)) {
if (total_mapcount == 1) { if (total_mapcount == 1) {
/* /*
* The page is all ours. Move it to * The page is all ours. Move it to
...@@ -2417,24 +2417,24 @@ static int do_wp_page(struct vm_fault *vmf) ...@@ -2417,24 +2417,24 @@ static int do_wp_page(struct vm_fault *vmf)
* Protected against the rmap code by * Protected against the rmap code by
* the page lock. * the page lock.
*/ */
page_move_anon_rmap(old_page, vma); page_move_anon_rmap(vmf->page, vma);
} }
unlock_page(old_page); unlock_page(vmf->page);
return wp_page_reuse(vmf, old_page, 0, 0); return wp_page_reuse(vmf, 0, 0);
} }
unlock_page(old_page); unlock_page(vmf->page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) { (VM_WRITE|VM_SHARED))) {
return wp_page_shared(vmf, old_page); return wp_page_shared(vmf);
} }
/* /*
* Ok, we need to copy. Oh, well.. * Ok, we need to copy. Oh, well..
*/ */
get_page(old_page); get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
return wp_page_copy(vmf, old_page); return wp_page_copy(vmf);
} }
static void unmap_mapping_range_vma(struct vm_area_struct *vma, static void unmap_mapping_range_vma(struct vm_area_struct *vma,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment