Commit 37bc2ff5 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: return the address from page_mapped_in_vma()

The only user of this function calls page_address_in_vma() immediately
after page_mapped_in_vma() calculates it and uses it to return true/false.
Return the address instead, allowing memory-failure to skip the call to
page_address_in_vma().

Link: https://lkml.kernel.org/r/20240412193510.2356957-4-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarJane Chu <jane.chu@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f2b37197
...@@ -730,7 +730,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, ...@@ -730,7 +730,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
/* /*
* rmap_walk_control: To control rmap traversing for specific needs * rmap_walk_control: To control rmap traversing for specific needs
......
...@@ -473,10 +473,11 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p, ...@@ -473,10 +473,11 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
} }
static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
struct vm_area_struct *vma, struct vm_area_struct *vma, struct list_head *to_kill,
struct list_head *to_kill) unsigned long addr)
{ {
unsigned long addr = page_address_in_vma(p, vma); if (addr == -EFAULT)
return;
__add_to_kill(tsk, p, vma, to_kill, addr); __add_to_kill(tsk, p, vma, to_kill, addr);
} }
...@@ -601,7 +602,6 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) ...@@ -601,7 +602,6 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
static void collect_procs_anon(struct folio *folio, struct page *page, static void collect_procs_anon(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early) struct list_head *to_kill, int force_early)
{ {
struct vm_area_struct *vma;
struct task_struct *tsk; struct task_struct *tsk;
struct anon_vma *av; struct anon_vma *av;
pgoff_t pgoff; pgoff_t pgoff;
...@@ -613,8 +613,10 @@ static void collect_procs_anon(struct folio *folio, struct page *page, ...@@ -613,8 +613,10 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
pgoff = page_to_pgoff(page); pgoff = page_to_pgoff(page);
rcu_read_lock(); rcu_read_lock();
for_each_process(tsk) { for_each_process(tsk) {
struct vm_area_struct *vma;
struct anon_vma_chain *vmac; struct anon_vma_chain *vmac;
struct task_struct *t = task_early_kill(tsk, force_early); struct task_struct *t = task_early_kill(tsk, force_early);
unsigned long addr;
if (!t) if (!t)
continue; continue;
...@@ -623,9 +625,8 @@ static void collect_procs_anon(struct folio *folio, struct page *page, ...@@ -623,9 +625,8 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
vma = vmac->vma; vma = vmac->vma;
if (vma->vm_mm != t->mm) if (vma->vm_mm != t->mm)
continue; continue;
if (!page_mapped_in_vma(page, vma)) addr = page_mapped_in_vma(page, vma);
continue; add_to_kill_anon_file(t, page, vma, to_kill, addr);
add_to_kill_anon_file(t, page, vma, to_kill);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -648,6 +649,7 @@ static void collect_procs_file(struct folio *folio, struct page *page, ...@@ -648,6 +649,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
pgoff = page_to_pgoff(page); pgoff = page_to_pgoff(page);
for_each_process(tsk) { for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, force_early); struct task_struct *t = task_early_kill(tsk, force_early);
unsigned long addr;
if (!t) if (!t)
continue; continue;
...@@ -660,8 +662,10 @@ static void collect_procs_file(struct folio *folio, struct page *page, ...@@ -660,8 +662,10 @@ static void collect_procs_file(struct folio *folio, struct page *page,
* Assume applications who requested early kill want * Assume applications who requested early kill want
* to be informed of all such data corruptions. * to be informed of all such data corruptions.
*/ */
if (vma->vm_mm == t->mm) if (vma->vm_mm != t->mm)
add_to_kill_anon_file(t, page, vma, to_kill); continue;
addr = page_address_in_vma(page, vma);
add_to_kill_anon_file(t, page, vma, to_kill, addr);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -319,11 +319,12 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) ...@@ -319,11 +319,12 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* @page: the page to test * @page: the page to test
* @vma: the VMA to test * @vma: the VMA to test
* *
* Returns 1 if the page is mapped into the page tables of the VMA, 0 * Return: The address the page is mapped at if the page is in the range
* if the page is not mapped into the page tables of this VMA. Only * covered by the VMA and present in the page table. If the page is
* valid for normal file or anonymous VMAs. * outside the VMA or not present, returns -EFAULT.
* Only valid for normal file or anonymous VMAs.
*/ */
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
pgoff_t pgoff = folio->index + folio_page_idx(folio, page); pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
...@@ -336,9 +337,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -336,9 +337,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
pvmw.address = vma_address(vma, pgoff, 1); pvmw.address = vma_address(vma, pgoff, 1);
if (pvmw.address == -EFAULT) if (pvmw.address == -EFAULT)
return 0; goto out;
if (!page_vma_mapped_walk(&pvmw)) if (!page_vma_mapped_walk(&pvmw))
return 0; return -EFAULT;
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
return 1; out:
return pvmw.address;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment