Commit 43849758 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

khugepaged: use a folio throughout hpage_collapse_scan_file()

Replace the use of pages with folios.  Saves a few calls to
compound_head() and removes some uses of obsolete functions.

Link: https://lkml.kernel.org/r/20240403171838.1445826-8-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8d1e24c0
...@@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin, ...@@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file, TRACE_EVENT(mm_khugepaged_scan_file,
TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file, TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file,
int present, int swap, int result), int present, int swap, int result),
TP_ARGS(mm, page, file, present, swap, result), TP_ARGS(mm, folio, file, present, swap, result),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct mm_struct *, mm) __field(struct mm_struct *, mm)
...@@ -190,7 +190,7 @@ TRACE_EVENT(mm_khugepaged_scan_file, ...@@ -190,7 +190,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign( TP_fast_assign(
__entry->mm = mm; __entry->mm = mm;
__entry->pfn = page ? page_to_pfn(page) : -1; __entry->pfn = folio ? folio_pfn(folio) : -1;
__assign_str(filename, file->f_path.dentry->d_iname); __assign_str(filename, file->f_path.dentry->d_iname);
__entry->present = present; __entry->present = present;
__entry->swap = swap; __entry->swap = swap;
......
...@@ -2203,7 +2203,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, ...@@ -2203,7 +2203,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
struct file *file, pgoff_t start, struct file *file, pgoff_t start,
struct collapse_control *cc) struct collapse_control *cc)
{ {
struct page *page = NULL; struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
XA_STATE(xas, &mapping->i_pages, start); XA_STATE(xas, &mapping->i_pages, start);
int present, swap; int present, swap;
...@@ -2215,11 +2215,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, ...@@ -2215,11 +2215,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
memset(cc->node_load, 0, sizeof(cc->node_load)); memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask); nodes_clear(cc->alloc_nmask);
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page)) if (xas_retry(&xas, folio))
continue; continue;
if (xa_is_value(page)) { if (xa_is_value(folio)) {
++swap; ++swap;
if (cc->is_khugepaged && if (cc->is_khugepaged &&
swap > khugepaged_max_ptes_swap) { swap > khugepaged_max_ptes_swap) {
...@@ -2234,11 +2234,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, ...@@ -2234,11 +2234,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
* TODO: khugepaged should compact smaller compound pages * TODO: khugepaged should compact smaller compound pages
* into a PMD sized page * into a PMD sized page
*/ */
if (PageTransCompound(page)) { if (folio_test_large(folio)) {
struct page *head = compound_head(page); result = folio_order(folio) == HPAGE_PMD_ORDER &&
folio->index == start
result = compound_order(head) == HPAGE_PMD_ORDER &&
head->index == start
/* Maybe PMD-mapped */ /* Maybe PMD-mapped */
? SCAN_PTE_MAPPED_HUGEPAGE ? SCAN_PTE_MAPPED_HUGEPAGE
: SCAN_PAGE_COMPOUND; : SCAN_PAGE_COMPOUND;
...@@ -2251,28 +2249,29 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, ...@@ -2251,28 +2249,29 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
break; break;
} }
node = page_to_nid(page); node = folio_nid(folio);
if (hpage_collapse_scan_abort(node, cc)) { if (hpage_collapse_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT; result = SCAN_SCAN_ABORT;
break; break;
} }
cc->node_load[node]++; cc->node_load[node]++;
if (!PageLRU(page)) { if (!folio_test_lru(folio)) {
result = SCAN_PAGE_LRU; result = SCAN_PAGE_LRU;
break; break;
} }
if (page_count(page) != if (folio_ref_count(folio) !=
1 + page_mapcount(page) + page_has_private(page)) { 1 + folio_mapcount(folio) + folio_test_private(folio)) {
result = SCAN_PAGE_COUNT; result = SCAN_PAGE_COUNT;
break; break;
} }
/* /*
* We probably should check if the page is referenced here, but * We probably should check if the folio is referenced
* nobody would transfer pte_young() to PageReferenced() for us. * here, but nobody would transfer pte_young() to
* And rmap walk here is just too costly... * folio_test_referenced() for us. And rmap walk here
* is just too costly...
*/ */
present++; present++;
...@@ -2294,7 +2293,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, ...@@ -2294,7 +2293,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
} }
} }
trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
return result; return result;
} }
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment