Commit 5fcd079a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

uprobes: use folios more widely in __replace_page()

Remove a few hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-45-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 98b211d6
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/rmap.h> /* anon_vma_prepare */ #include <linux/rmap.h> /* anon_vma_prepare */
#include <linux/mmu_notifier.h> /* set_pte_at_notify */ #include <linux/mmu_notifier.h> /* set_pte_at_notify */
#include <linux/swap.h> /* try_to_free_swap */ #include <linux/swap.h> /* folio_free_swap */
#include <linux/ptrace.h> /* user_enable_single_step */ #include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */ #include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */ #include "../../mm/internal.h" /* munlock_vma_page */
...@@ -154,8 +154,9 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) ...@@ -154,8 +154,9 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
static int __replace_page(struct vm_area_struct *vma, unsigned long addr, static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page) struct page *old_page, struct page *new_page)
{ {
struct folio *old_folio = page_folio(old_page);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0); DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
int err; int err;
struct mmu_notifier_range range; struct mmu_notifier_range range;
...@@ -169,8 +170,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -169,8 +170,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
return err; return err;
} }
/* For try_to_free_swap() below */ /* For folio_free_swap() below */
lock_page(old_page); folio_lock(old_folio);
mmu_notifier_invalidate_range_start(&range); mmu_notifier_invalidate_range_start(&range);
err = -EAGAIN; err = -EAGAIN;
...@@ -186,7 +187,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -186,7 +187,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
/* no new page, just dec_mm_counter for old_page */ /* no new page, just dec_mm_counter for old_page */
dec_mm_counter(mm, MM_ANONPAGES); dec_mm_counter(mm, MM_ANONPAGES);
if (!PageAnon(old_page)) { if (!folio_test_anon(old_folio)) {
dec_mm_counter(mm, mm_counter_file(old_page)); dec_mm_counter(mm, mm_counter_file(old_page));
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
} }
...@@ -198,15 +199,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -198,15 +199,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mk_pte(new_page, vma->vm_page_prot)); mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, vma, false); page_remove_rmap(old_page, vma, false);
if (!page_mapped(old_page)) if (!folio_mapped(old_folio))
try_to_free_swap(old_page); folio_free_swap(old_folio);
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
put_page(old_page); folio_put(old_folio);
err = 0; err = 0;
unlock: unlock:
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
unlock_page(old_page); folio_unlock(old_folio);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment