Commit e87340ca authored by ZhangPeng's avatar ZhangPeng Committed by Andrew Morton

userfaultfd: convert copy_huge_page_from_user() to copy_folio_from_user()

Replace copy_huge_page_from_user() with copy_folio_from_user(). 
copy_folio_from_user() does the same as copy_huge_page_from_user(), but
takes in a folio instead of a page.

Convert page_kaddr to kaddr in copy_folio_from_user() to do indenting
cleanup.

Link: https://lkml.kernel.org/r/20230410133932.32288-4-zhangpeng362@huawei.comSigned-off-by: default avatarZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0d508c1f
...@@ -3681,10 +3681,9 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, ...@@ -3681,10 +3681,9 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr_hint, unsigned long addr_hint,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned int pages_per_huge_page); unsigned int pages_per_huge_page);
extern long copy_huge_page_from_user(struct page *dst_page, long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src, const void __user *usr_src,
unsigned int pages_per_huge_page, bool allow_pagefault);
bool allow_pagefault);
/** /**
* vma_is_special_huge - Are transhuge page-table entries considered special? * vma_is_special_huge - Are transhuge page-table entries considered special?
......
...@@ -6217,9 +6217,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, ...@@ -6217,9 +6217,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out; goto out;
} }
ret = copy_huge_page_from_user(&folio->page, ret = copy_folio_from_user(folio, (const void __user *) src_addr,
(const void __user *) src_addr, false);
pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_lock */ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -5868,26 +5868,25 @@ void copy_user_huge_page(struct page *dst, struct page *src, ...@@ -5868,26 +5868,25 @@ void copy_user_huge_page(struct page *dst, struct page *src,
process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
} }
long copy_huge_page_from_user(struct page *dst_page, long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src, const void __user *usr_src,
unsigned int pages_per_huge_page, bool allow_pagefault)
bool allow_pagefault)
{ {
void *page_kaddr; void *kaddr;
unsigned long i, rc = 0; unsigned long i, rc = 0;
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; unsigned int nr_pages = folio_nr_pages(dst_folio);
unsigned long ret_val = nr_pages * PAGE_SIZE;
struct page *subpage; struct page *subpage;
for (i = 0; i < pages_per_huge_page; i++) { for (i = 0; i < nr_pages; i++) {
subpage = nth_page(dst_page, i); subpage = folio_page(dst_folio, i);
page_kaddr = kmap_local_page(subpage); kaddr = kmap_local_page(subpage);
if (!allow_pagefault) if (!allow_pagefault)
pagefault_disable(); pagefault_disable();
rc = copy_from_user(page_kaddr, rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
usr_src + i * PAGE_SIZE, PAGE_SIZE);
if (!allow_pagefault) if (!allow_pagefault)
pagefault_enable(); pagefault_enable();
kunmap_local(page_kaddr); kunmap_local(kaddr);
ret_val -= (PAGE_SIZE - rc); ret_val -= (PAGE_SIZE - rc);
if (rc) if (rc)
......
...@@ -421,10 +421,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb( ...@@ -421,10 +421,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
mmap_read_unlock(dst_mm); mmap_read_unlock(dst_mm);
BUG_ON(!page); BUG_ON(!page);
err = copy_huge_page_from_user(page, err = copy_folio_from_user(page_folio(page),
(const void __user *)src_addr, (const void __user *)src_addr, true);
vma_hpagesize / PAGE_SIZE,
true);
if (unlikely(err)) { if (unlikely(err)) {
err = -EFAULT; err = -EFAULT;
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment