mm/gup: Convert gup_pte_range() to use a folio

We still call try_grab_folio() once per PTE; a future patch could
optimise to just adjust the reference count for each page within
the folio.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 822951d8
...@@ -2232,7 +2232,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ...@@ -2232,7 +2232,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
ptem = ptep = pte_offset_map(&pmd, addr); ptem = ptep = pte_offset_map(&pmd, addr);
do { do {
pte_t pte = ptep_get_lockless(ptep); pte_t pte = ptep_get_lockless(ptep);
struct page *head, *page; struct page *page;
struct folio *folio;
/* /*
* Similar to the PMD case below, NUMA hinting must take slow * Similar to the PMD case below, NUMA hinting must take slow
...@@ -2259,22 +2260,20 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ...@@ -2259,22 +2260,20 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte); page = pte_page(pte);
head = try_grab_compound_head(page, 1, flags); folio = try_grab_folio(page, 1, flags);
if (!head) if (!folio)
goto pte_unmap; goto pte_unmap;
if (unlikely(page_is_secretmem(page))) { if (unlikely(page_is_secretmem(page))) {
put_compound_head(head, 1, flags); gup_put_folio(folio, 1, flags);
goto pte_unmap; goto pte_unmap;
} }
if (unlikely(pte_val(pte) != pte_val(*ptep))) { if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_compound_head(head, 1, flags); gup_put_folio(folio, 1, flags);
goto pte_unmap; goto pte_unmap;
} }
VM_BUG_ON_PAGE(compound_head(page) != head, page);
/* /*
* We need to make the page accessible if and only if we are * We need to make the page accessible if and only if we are
* going to access its content (the FOLL_PIN case). Please * going to access its content (the FOLL_PIN case). Please
...@@ -2284,14 +2283,13 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ...@@ -2284,14 +2283,13 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
if (flags & FOLL_PIN) { if (flags & FOLL_PIN) {
ret = arch_make_page_accessible(page); ret = arch_make_page_accessible(page);
if (ret) { if (ret) {
unpin_user_page(page); gup_put_folio(folio, 1, flags);
goto pte_unmap; goto pte_unmap;
} }
} }
SetPageReferenced(page); folio_set_referenced(folio);
pages[*nr] = page; pages[*nr] = page;
(*nr)++; (*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end); } while (ptep++, addr += PAGE_SIZE, addr != end);
ret = 1; ret = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment