Commit e87340ca authored by ZhangPeng's avatar ZhangPeng Committed by Andrew Morton

userfaultfd: convert copy_huge_page_from_user() to copy_folio_from_user()

Replace copy_huge_page_from_user() with copy_folio_from_user(). 
copy_folio_from_user() does the same as copy_huge_page_from_user(), but
takes in a folio instead of a page.

Convert page_kaddr to kaddr in copy_folio_from_user() to do indenting
cleanup.

Link: https://lkml.kernel.org/r/20230410133932.32288-4-zhangpeng362@huawei.comSigned-off-by: default avatarZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0d508c1f
......@@ -3681,9 +3681,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr_hint,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page);
extern long copy_huge_page_from_user(struct page *dst_page,
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
unsigned int pages_per_huge_page,
bool allow_pagefault);
/**
......
......@@ -6217,9 +6217,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out;
}
ret = copy_huge_page_from_user(&folio->page,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
ret = copy_folio_from_user(folio, (const void __user *) src_addr,
false);
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
......
......@@ -5868,26 +5868,25 @@ void copy_user_huge_page(struct page *dst, struct page *src,
process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
}
long copy_huge_page_from_user(struct page *dst_page,
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
unsigned int pages_per_huge_page,
bool allow_pagefault)
{
void *page_kaddr;
void *kaddr;
unsigned long i, rc = 0;
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
unsigned int nr_pages = folio_nr_pages(dst_folio);
unsigned long ret_val = nr_pages * PAGE_SIZE;
struct page *subpage;
for (i = 0; i < pages_per_huge_page; i++) {
subpage = nth_page(dst_page, i);
page_kaddr = kmap_local_page(subpage);
for (i = 0; i < nr_pages; i++) {
subpage = folio_page(dst_folio, i);
kaddr = kmap_local_page(subpage);
if (!allow_pagefault)
pagefault_disable();
rc = copy_from_user(page_kaddr,
usr_src + i * PAGE_SIZE, PAGE_SIZE);
rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
if (!allow_pagefault)
pagefault_enable();
kunmap_local(page_kaddr);
kunmap_local(kaddr);
ret_val -= (PAGE_SIZE - rc);
if (rc)
......
......@@ -421,10 +421,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
mmap_read_unlock(dst_mm);
BUG_ON(!page);
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
vma_hpagesize / PAGE_SIZE,
true);
err = copy_folio_from_user(page_folio(page),
(const void __user *)src_addr, true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment