Commit 73eab3ca authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: migrate: convert migrate_misplaced_page() to migrate_misplaced_folio()

At present, numa balance only support base page and PMD-mapped THP, but we
will expand to support to migrate large folio/pte-mapped THP in the
future, it is better to make migrate_misplaced_page() to take a folio
instead of a page, and rename it to migrate_misplaced_folio(), it is a
preparation, also this remove several compound_head() calls.

Link: https://lkml.kernel.org/r/20230913095131.2426871-5-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2ac9e99f
...@@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page) ...@@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page)
} }
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node); int node);
#else #else
static inline int migrate_misplaced_page(struct page *page, static inline int migrate_misplaced_folio(struct folio *folio,
struct vm_area_struct *vma, int node) struct vm_area_struct *vma, int node)
{ {
return -EAGAIN; /* can't migrate now */ return -EAGAIN; /* can't migrate now */
......
...@@ -1567,7 +1567,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) ...@@ -1567,7 +1567,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
writable = false; writable = false;
migrated = migrate_misplaced_page(page, vma, target_nid); migrated = migrate_misplaced_folio(page_folio(page), vma, target_nid);
if (migrated) { if (migrated) {
flags |= TNF_MIGRATED; flags |= TNF_MIGRATED;
page_nid = target_nid; page_nid = target_nid;
......
...@@ -4812,7 +4812,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) ...@@ -4812,7 +4812,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
writable = false; writable = false;
/* Migrate to the requested node */ /* Migrate to the requested node */
if (migrate_misplaced_page(page, vma, target_nid)) { if (migrate_misplaced_folio(page_folio(page), vma, target_nid)) {
page_nid = target_nid; page_nid = target_nid;
flags |= TNF_MIGRATED; flags |= TNF_MIGRATED;
} else { } else {
......
...@@ -2516,55 +2516,58 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) ...@@ -2516,55 +2516,58 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
} }
/* /*
* Attempt to migrate a misplaced page to the specified destination * Attempt to migrate a misplaced folio to the specified destination
* node. Caller is expected to have an elevated reference count on * node. Caller is expected to have an elevated reference count on
* the page that will be dropped by this function before returning. * the folio that will be dropped by this function before returning.
*/ */
int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node) int node)
{ {
pg_data_t *pgdat = NODE_DATA(node); pg_data_t *pgdat = NODE_DATA(node);
int isolated; int isolated;
int nr_remaining; int nr_remaining;
unsigned int nr_succeeded; unsigned int nr_succeeded;
LIST_HEAD(migratepages); LIST_HEAD(migratepages);
int nr_pages = thp_nr_pages(page); int nr_pages = folio_nr_pages(folio);
/* /*
* Don't migrate file pages that are mapped in multiple processes * Don't migrate file folios that are mapped in multiple processes
* with execute permissions as they are probably shared libraries. * with execute permissions as they are probably shared libraries.
* To check if the folio is shared, ideally we want to make sure
* every page is mapped to the same process. Doing that is very
* expensive, so check the estimated mapcount of the folio instead.
*/ */
if (page_mapcount(page) != 1 && page_is_file_lru(page) && if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
(vma->vm_flags & VM_EXEC)) (vma->vm_flags & VM_EXEC))
goto out; goto out;
/* /*
* Also do not migrate dirty pages as not all filesystems can move * Also do not migrate dirty folios as not all filesystems can move
* dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
*/ */
if (page_is_file_lru(page) && PageDirty(page)) if (folio_is_file_lru(folio) && folio_test_dirty(folio))
goto out; goto out;
isolated = numamigrate_isolate_folio(pgdat, page_folio(page)); isolated = numamigrate_isolate_folio(pgdat, folio);
if (!isolated) if (!isolated)
goto out; goto out;
list_add(&page->lru, &migratepages); list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC, NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded); MR_NUMA_MISPLACED, &nr_succeeded);
if (nr_remaining) { if (nr_remaining) {
if (!list_empty(&migratepages)) { if (!list_empty(&migratepages)) {
list_del(&page->lru); list_del(&folio->lru);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + node_stat_mod_folio(folio, NR_ISOLATED_ANON +
page_is_file_lru(page), -nr_pages); folio_is_file_lru(folio), -nr_pages);
putback_lru_page(page); folio_putback_lru(folio);
} }
isolated = 0; isolated = 0;
} }
if (nr_succeeded) { if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
nr_succeeded); nr_succeeded);
} }
...@@ -2572,7 +2575,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, ...@@ -2572,7 +2575,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
return isolated; return isolated;
out: out:
put_page(page); folio_put(folio);
return 0; return 0;
} }
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment