Commit 530dd992 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: memory: improve copy_user_large_folio()

Use nr_pages instead of pages_per_huge_page and move the address alignment
from copy_user_large_folio() into the callers since it is only needed when
we don't know which address will be accessed.

Link: https://lkml.kernel.org/r/20240618091242.2140164-4-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5132633e
......@@ -5492,9 +5492,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
ret = PTR_ERR(new_folio);
break;
}
ret = copy_user_large_folio(new_folio,
pte_folio,
addr, dst_vma);
ret = copy_user_large_folio(new_folio, pte_folio,
ALIGN_DOWN(addr, sz), dst_vma);
folio_put(pte_folio);
if (ret) {
folio_put(new_folio);
......@@ -6684,7 +6683,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct hstate *h = hstate_vma(dst_vma);
struct address_space *mapping = dst_vma->vm_file->f_mapping;
pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
unsigned long size;
unsigned long size = huge_page_size(h);
int vm_shared = dst_vma->vm_flags & VM_SHARED;
pte_t _dst_pte;
spinlock_t *ptl;
......@@ -6703,8 +6702,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
}
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
huge_page_size(h));
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
......@@ -6778,7 +6776,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
ret = copy_user_large_folio(folio, *foliop,
ALIGN_DOWN(dst_addr, size), dst_vma);
folio_put(*foliop);
*foliop = NULL;
if (ret) {
......@@ -6805,9 +6804,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
/* Add shared, newly allocated pages to the page cache. */
if (vm_shared && !is_continue) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
ret = -EFAULT;
if (idx >= size)
if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
goto out_release_nounlock;
/*
......@@ -6864,7 +6862,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (wp_enabled)
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
......
......@@ -6521,20 +6521,17 @@ static int copy_subpage(unsigned long addr, int idx, void *arg)
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
{
unsigned int pages_per_huge_page = folio_nr_pages(dst);
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
unsigned int nr_pages = folio_nr_pages(dst);
struct copy_subpage_arg arg = {
.dst = dst,
.src = src,
.vma = vma,
};
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
return copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
}
long copy_folio_from_user(struct folio *dst_folio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment