Commit 09c55050 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/rmap: pass folio to hugepage_add_anon_rmap()

Let's pass a folio; we are always mapping the entire thing.

Link: https://lkml.kernel.org/r/20230913125113.313322-7-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 132b180f
...@@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, ...@@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
void page_remove_rmap(struct page *, struct vm_area_struct *, void page_remove_rmap(struct page *, struct vm_area_struct *,
bool compound); bool compound);
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags); unsigned long address, rmap_t flags);
void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *, void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address); unsigned long address);
......
...@@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio, ...@@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio,
pte = arch_make_huge_pte(pte, shift, vma->vm_flags); pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio)) if (folio_test_anon(folio))
hugepage_add_anon_rmap(new, vma, pvmw.address, hugepage_add_anon_rmap(folio, vma, pvmw.address,
rmap_flags); rmap_flags);
else else
page_dup_file_rmap(new, true); page_dup_file_rmap(new, true);
......
...@@ -2542,18 +2542,16 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) ...@@ -2542,18 +2542,16 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
* *
* RMAP_COMPOUND is ignored. * RMAP_COMPOUND is ignored.
*/ */
void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags) unsigned long address, rmap_t flags)
{ {
struct folio *folio = page_folio(page);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount); atomic_inc(&folio->_entire_mapcount);
if (flags & RMAP_EXCLUSIVE) if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page); SetPageAnonExclusive(&folio->page);
VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
PageAnonExclusive(page), folio); PageAnonExclusive(&folio->page), folio);
} }
void hugepage_add_new_anon_rmap(struct folio *folio, void hugepage_add_new_anon_rmap(struct folio *folio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment