Commit 412ad5fb authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: remove vma_address()

Convert the three remaining callers to call vma_pgoff_address() directly. 
This removes an ambiguity where we'd check just one page if passed a tail
page and all N pages if passed a head page.

Also add better kernel-doc for vma_pgoff_address().

Link: https://lkml.kernel.org/r/20240328225831.1765286-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7e834741
...@@ -804,9 +804,14 @@ void mlock_drain_remote(int cpu); ...@@ -804,9 +804,14 @@ void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/* /**
* Return the start of user virtual address at the specific offset within * vma_pgoff_address - Find the virtual address a page range is mapped at
* a vma. * @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
* @vma: The vma which maps this object.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/ */
static inline unsigned long static inline unsigned long
vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
...@@ -829,18 +834,6 @@ vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, ...@@ -829,18 +834,6 @@ vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
return address; return address;
} }
/*
* Return the start of user virtual address of a page within a vma.
* Returns -EFAULT if all of the page is outside the range of vma.
* If page is a compound head, the entire compound page is considered.
*/
static inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma)
{
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
}
/* /*
* Then at what user virtual address will none of the range be found in vma? * Then at what user virtual address will none of the range be found in vma?
* Assumes that vma_address() already returned a good starting address. * Assumes that vma_address() already returned a good starting address.
......
...@@ -775,6 +775,8 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) ...@@ -775,6 +775,8 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
pgoff_t pgoff;
if (folio_test_anon(folio)) { if (folio_test_anon(folio)) {
struct anon_vma *page__anon_vma = folio_anon_vma(folio); struct anon_vma *page__anon_vma = folio_anon_vma(folio);
/* /*
...@@ -790,7 +792,9 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -790,7 +792,9 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return -EFAULT; return -EFAULT;
} }
return vma_address(page, vma); /* The !page__anon_vma above handles KSM folios */
pgoff = folio->index + folio_page_idx(folio, page);
return vma_pgoff_address(pgoff, 1, vma);
} }
/* /*
...@@ -2588,7 +2592,8 @@ static void rmap_walk_anon(struct folio *folio, ...@@ -2588,7 +2592,8 @@ static void rmap_walk_anon(struct folio *folio,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) { pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma; struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(&folio->page, vma); unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
VM_BUG_ON_VMA(address == -EFAULT, vma); VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched(); cond_resched();
...@@ -2649,7 +2654,8 @@ static void rmap_walk_file(struct folio *folio, ...@@ -2649,7 +2654,8 @@ static void rmap_walk_file(struct folio *folio,
lookup: lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap, vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) { pgoff_start, pgoff_end) {
unsigned long address = vma_address(&folio->page, vma); unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
VM_BUG_ON_VMA(address == -EFAULT, vma); VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched(); cond_resched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment