Commit cfe3236d authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: huge_memory: convert __do_huge_pmd_anonymous_page() to use a folio

Patch series "mm: remove cgroup_throttle_swaprate() completely", v2.

Convert all the caller functions of cgroup_throttle_swaprate() to use
folios, and use folio_throttle_swaprate(), which allows us to remove
cgroup_throttle_swaprate() completely.


This patch (of 7):

Convert from page to folio within __do_huge_pmd_anonymous_page(), as we
need the precise page which is to be stored at this PTE in the folio, the
function still keep a page as the parameter.

Link: https://lkml.kernel.org/r/20230302115835.105364-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20230302115835.105364-2-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 16d91faf
...@@ -656,19 +656,20 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -656,19 +656,20 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
struct page *page, gfp_t gfp) struct page *page, gfp_t gfp)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct folio *folio = page_folio(page);
pgtable_t pgtable; pgtable_t pgtable;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vm_fault_t ret = 0; vm_fault_t ret = 0;
VM_BUG_ON_PAGE(!PageCompound(page), page); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
put_page(page); folio_put(folio);
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
count_vm_event(THP_FAULT_FALLBACK_CHARGE); count_vm_event(THP_FAULT_FALLBACK_CHARGE);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
cgroup_throttle_swaprate(page, gfp); folio_throttle_swaprate(folio, gfp);
pgtable = pte_alloc_one(vma->vm_mm); pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable)) { if (unlikely(!pgtable)) {
...@@ -678,11 +679,11 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -678,11 +679,11 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
clear_huge_page(page, vmf->address, HPAGE_PMD_NR); clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
/* /*
* The memory barrier inside __SetPageUptodate makes sure that * The memory barrier inside __folio_mark_uptodate makes sure that
* clear_huge_page writes become visible before the set_pmd_at() * clear_huge_page writes become visible before the set_pmd_at()
* write. * write.
*/ */
__SetPageUptodate(page); __folio_mark_uptodate(folio);
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_none(*vmf->pmd))) { if (unlikely(!pmd_none(*vmf->pmd))) {
...@@ -697,7 +698,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -697,7 +698,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
/* Deliver the page fault to userland */ /* Deliver the page fault to userland */
if (userfaultfd_missing(vma)) { if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
put_page(page); folio_put(folio);
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
ret = handle_userfault(vmf, VM_UFFD_MISSING); ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK); VM_BUG_ON(ret & VM_FAULT_FALLBACK);
...@@ -706,8 +707,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -706,8 +707,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
entry = mk_huge_pmd(page, vma->vm_page_prot); entry = mk_huge_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
page_add_new_anon_rmap(page, vma, haddr); folio_add_new_anon_rmap(folio, vma, haddr);
lru_cache_add_inactive_or_unevictable(page, vma); folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
...@@ -724,7 +725,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -724,7 +725,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
release: release:
if (pgtable) if (pgtable)
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
put_page(page); folio_put(folio);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment