Commit d986ba2b authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: huge_memory: use a folio in change_huge_pmd()

Use a folio in change_huge_pmd(), which helps to remove last
xchg_page_access_time() caller.

Link: https://lkml.kernel.org/r/20231018140806.2783514-11-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ec177880
...@@ -1856,7 +1856,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1856,7 +1856,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (is_swap_pmd(*pmd)) { if (is_swap_pmd(*pmd)) {
swp_entry_t entry = pmd_to_swp_entry(*pmd); swp_entry_t entry = pmd_to_swp_entry(*pmd);
struct page *page = pfn_swap_entry_to_page(entry); struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
pmd_t newpmd; pmd_t newpmd;
VM_BUG_ON(!is_pmd_migration_entry(*pmd)); VM_BUG_ON(!is_pmd_migration_entry(*pmd));
...@@ -1865,7 +1865,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1865,7 +1865,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* A protection check is difficult so * A protection check is difficult so
* just be safe and disable write * just be safe and disable write
*/ */
if (PageAnon(page)) if (folio_test_anon(folio))
entry = make_readable_exclusive_migration_entry(swp_offset(entry)); entry = make_readable_exclusive_migration_entry(swp_offset(entry));
else else
entry = make_readable_migration_entry(swp_offset(entry)); entry = make_readable_migration_entry(swp_offset(entry));
...@@ -1887,7 +1887,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1887,7 +1887,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
#endif #endif
if (prot_numa) { if (prot_numa) {
struct page *page; struct folio *folio;
bool toptier; bool toptier;
/* /*
* Avoid trapping faults against the zero page. The read-only * Avoid trapping faults against the zero page. The read-only
...@@ -1900,8 +1900,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1900,8 +1900,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (pmd_protnone(*pmd)) if (pmd_protnone(*pmd))
goto unlock; goto unlock;
page = pmd_page(*pmd); folio = page_folio(pmd_page(*pmd));
toptier = node_is_toptier(page_to_nid(page)); toptier = node_is_toptier(folio_nid(folio));
/* /*
* Skip scanning top tier node if normal numa * Skip scanning top tier node if normal numa
* balancing is disabled * balancing is disabled
...@@ -1912,7 +1912,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1912,7 +1912,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier) !toptier)
xchg_page_access_time(page, jiffies_to_msecs(jiffies)); folio_xchg_access_time(folio,
jiffies_to_msecs(jiffies));
} }
/* /*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical * In case prot_numa, we are under mmap_read_lock(mm). It's critical
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment