filemap: Use a folio in filemap_map_pages

Saves 61 bytes due to fewer calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 9184a307
...@@ -3227,7 +3227,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) ...@@ -3227,7 +3227,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
return false; return false;
} }
static struct page *next_uptodate_page(struct folio *folio, static struct folio *next_uptodate_page(struct folio *folio,
struct address_space *mapping, struct address_space *mapping,
struct xa_state *xas, pgoff_t end_pgoff) struct xa_state *xas, pgoff_t end_pgoff)
{ {
...@@ -3258,7 +3258,7 @@ static struct page *next_uptodate_page(struct folio *folio, ...@@ -3258,7 +3258,7 @@ static struct page *next_uptodate_page(struct folio *folio,
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
if (xas->xa_index >= max_idx) if (xas->xa_index >= max_idx)
goto unlock; goto unlock;
return &folio->page; return folio;
unlock: unlock:
folio_unlock(folio); folio_unlock(folio);
skip: skip:
...@@ -3268,7 +3268,7 @@ static struct page *next_uptodate_page(struct folio *folio, ...@@ -3268,7 +3268,7 @@ static struct page *next_uptodate_page(struct folio *folio,
return NULL; return NULL;
} }
static inline struct page *first_map_page(struct address_space *mapping, static inline struct folio *first_map_page(struct address_space *mapping,
struct xa_state *xas, struct xa_state *xas,
pgoff_t end_pgoff) pgoff_t end_pgoff)
{ {
...@@ -3276,7 +3276,7 @@ static inline struct page *first_map_page(struct address_space *mapping, ...@@ -3276,7 +3276,7 @@ static inline struct page *first_map_page(struct address_space *mapping,
mapping, xas, end_pgoff); mapping, xas, end_pgoff);
} }
static inline struct page *next_map_page(struct address_space *mapping, static inline struct folio *next_map_page(struct address_space *mapping,
struct xa_state *xas, struct xa_state *xas,
pgoff_t end_pgoff) pgoff_t end_pgoff)
{ {
...@@ -3293,16 +3293,17 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, ...@@ -3293,16 +3293,17 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t last_pgoff = start_pgoff; pgoff_t last_pgoff = start_pgoff;
unsigned long addr; unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff); XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct page *head, *page; struct folio *folio;
struct page *page;
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
vm_fault_t ret = 0; vm_fault_t ret = 0;
rcu_read_lock(); rcu_read_lock();
head = first_map_page(mapping, &xas, end_pgoff); folio = first_map_page(mapping, &xas, end_pgoff);
if (!head) if (!folio)
goto out; goto out;
if (filemap_map_pmd(vmf, head)) { if (filemap_map_pmd(vmf, &folio->page)) {
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
goto out; goto out;
} }
...@@ -3310,7 +3311,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, ...@@ -3310,7 +3311,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
do { do {
page = find_subpage(head, xas.xa_index); page = folio_file_page(folio, xas.xa_index);
if (PageHWPoison(page)) if (PageHWPoison(page))
goto unlock; goto unlock;
...@@ -3331,12 +3332,12 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, ...@@ -3331,12 +3332,12 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
do_set_pte(vmf, page, addr); do_set_pte(vmf, page, addr);
/* no need to invalidate: a not-present page won't be cached */ /* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, addr, vmf->pte); update_mmu_cache(vma, addr, vmf->pte);
unlock_page(head); folio_unlock(folio);
continue; continue;
unlock: unlock:
unlock_page(head); folio_unlock(folio);
put_page(head); folio_put(folio);
} while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
out: out:
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment