Commit f7091ed6 authored by Haiyue Wang's avatar Haiyue Wang Committed by Andrew Morton

mm: fix the handling Non-LRU pages returned by follow_page

The handling Non-LRU pages returned by follow_page() jumps directly, it
doesn't call put_page() to handle the reference count, since 'FOLL_GET'
flag for follow_page() has get_page() called.  Fix the zone device page
check by handling the page reference count correctly before returning.

And as David reviewed, "device pages are never PageKsm pages".  Drop this
zone device page check for break_ksm().

Since the zone device page can't be a transparent huge page, so drop the
redundant zone device page check for split_huge_pages_pid().  (by Miaohe)

Link: https://lkml.kernel.org/r/20220823135841.934465-3-haiyue.wang@intel.com
Fixes: 3218f871 ("mm: handling Non-LRU pages returned by vm_normal_pages")
Signed-off-by: default avatarHaiyue Wang <haiyue.wang@intel.com>
Reviewed-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarAlistair Popple <apopple@nvidia.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Alex Sierra <alex.sierra@amd.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ca3d76b0
...@@ -3001,7 +3001,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, ...@@ -3001,7 +3001,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
/* FOLL_DUMP to ignore special (like zero) pages */ /* FOLL_DUMP to ignore special (like zero) pages */
page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) if (IS_ERR_OR_NULL(page))
continue; continue;
if (!is_transparent_hugepage(page)) if (!is_transparent_hugepage(page))
......
...@@ -475,7 +475,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) ...@@ -475,7 +475,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
cond_resched(); cond_resched();
page = follow_page(vma, addr, page = follow_page(vma, addr,
FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) if (IS_ERR_OR_NULL(page))
break; break;
if (PageKsm(page)) if (PageKsm(page))
ret = handle_mm_fault(vma, addr, ret = handle_mm_fault(vma, addr,
...@@ -560,12 +560,15 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) ...@@ -560,12 +560,15 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
goto out; goto out;
page = follow_page(vma, addr, FOLL_GET); page = follow_page(vma, addr, FOLL_GET);
if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) if (IS_ERR_OR_NULL(page))
goto out; goto out;
if (is_zone_device_page(page))
goto out_putpage;
if (PageAnon(page)) { if (PageAnon(page)) {
flush_anon_page(vma, page, addr); flush_anon_page(vma, page, addr);
flush_dcache_page(page); flush_dcache_page(page);
} else { } else {
out_putpage:
put_page(page); put_page(page);
out: out:
page = NULL; page = NULL;
...@@ -2322,11 +2325,13 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) ...@@ -2322,11 +2325,13 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
if (ksm_test_exit(mm)) if (ksm_test_exit(mm))
break; break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET); *page = follow_page(vma, ksm_scan.address, FOLL_GET);
if (IS_ERR_OR_NULL(*page) || is_zone_device_page(*page)) { if (IS_ERR_OR_NULL(*page)) {
ksm_scan.address += PAGE_SIZE; ksm_scan.address += PAGE_SIZE;
cond_resched(); cond_resched();
continue; continue;
} }
if (is_zone_device_page(*page))
goto next_page;
if (PageAnon(*page)) { if (PageAnon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address); flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page); flush_dcache_page(*page);
...@@ -2341,6 +2346,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) ...@@ -2341,6 +2346,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
mmap_read_unlock(mm); mmap_read_unlock(mm);
return rmap_item; return rmap_item;
} }
next_page:
put_page(*page); put_page(*page);
ksm_scan.address += PAGE_SIZE; ksm_scan.address += PAGE_SIZE;
cond_resched(); cond_resched();
......
...@@ -1691,9 +1691,12 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, ...@@ -1691,9 +1691,12 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
goto out; goto out;
err = -ENOENT; err = -ENOENT;
if (!page || is_zone_device_page(page)) if (!page)
goto out; goto out;
if (is_zone_device_page(page))
goto out_putpage;
err = 0; err = 0;
if (page_to_nid(page) == node) if (page_to_nid(page) == node)
goto out_putpage; goto out_putpage;
...@@ -1891,13 +1894,15 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, ...@@ -1891,13 +1894,15 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
if (IS_ERR(page)) if (IS_ERR(page))
goto set_status; goto set_status;
if (page && !is_zone_device_page(page)) { err = -ENOENT;
if (!page)
goto set_status;
if (!is_zone_device_page(page))
err = page_to_nid(page); err = page_to_nid(page);
if (foll_flags & FOLL_GET) if (foll_flags & FOLL_GET)
put_page(page); put_page(page);
} else {
err = -ENOENT;
}
set_status: set_status:
*status = err; *status = err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment