Commit 294de6d8 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: memory: rename page_copy_prealloc() to folio_prealloc()

Let's rename page_copy_prealloc() to folio_prealloc(), which could be
reused in more functons, as it maybe zero the new page, pass a new
need_zero to it, and call the vma_alloc_zeroed_movable_folio() if
need_zero is true.

Link: https://lkml.kernel.org/r/20231118023232.1409103-4-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f8b6187d
......@@ -992,12 +992,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
return 0;
}
static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
struct vm_area_struct *vma, unsigned long addr)
static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
struct vm_area_struct *vma, unsigned long addr, bool need_zero)
{
struct folio *new_folio;
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
if (need_zero)
new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
else
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
addr, false);
if (!new_folio)
return NULL;
......@@ -1129,7 +1134,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
} else if (ret == -EBUSY) {
goto out;
} else if (ret == -EAGAIN) {
prealloc = page_copy_prealloc(src_mm, src_vma, addr);
prealloc = folio_prealloc(src_mm, src_vma, addr, false);
if (!prealloc)
return -ENOMEM;
} else if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment