Commit d4ab0316 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios

Continue to use a folio inside free_huge_page() by converting
hugetlb_cgroup_uncharge_page*() to folios.

Link: https://lkml.kernel.org/r/20221101223059.460937-8-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0356c4b9
...@@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, ...@@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page); struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct page *page); struct folio *folio);
extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
struct page *page); struct folio *folio);
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg); struct hugetlb_cgroup *h_cg);
...@@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, ...@@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
{ {
} }
static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct page *page) struct folio *folio)
{ {
} }
static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx, static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
unsigned long nr_pages, unsigned long nr_pages,
struct page *page) struct folio *folio)
{ {
} }
static inline void hugetlb_cgroup_uncharge_cgroup(int idx, static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
......
...@@ -1742,10 +1742,10 @@ void free_huge_page(struct page *page) ...@@ -1742,10 +1742,10 @@ void free_huge_page(struct page *page)
spin_lock_irqsave(&hugetlb_lock, flags); spin_lock_irqsave(&hugetlb_lock, flags);
folio_clear_hugetlb_migratable(folio); folio_clear_hugetlb_migratable(folio);
hugetlb_cgroup_uncharge_page(hstate_index(h), hugetlb_cgroup_uncharge_folio(hstate_index(h),
pages_per_huge_page(h), page); pages_per_huge_page(h), folio);
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), page); pages_per_huge_page(h), folio);
if (restore_reserve) if (restore_reserve)
h->resv_huge_pages++; h->resv_huge_pages++;
...@@ -2872,6 +2872,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2872,6 +2872,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
struct hugepage_subpool *spool = subpool_vma(vma); struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct page *page; struct page *page;
struct folio *folio;
long map_chg, map_commit; long map_chg, map_commit;
long gbl_chg; long gbl_chg;
int ret, idx; int ret, idx;
...@@ -2935,6 +2936,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2935,6 +2936,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
* a reservation exists for the allocation. * a reservation exists for the allocation.
*/ */
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
if (!page) { if (!page) {
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
page = alloc_buddy_huge_page_with_mpol(h, vma, addr); page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
...@@ -2949,6 +2951,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2949,6 +2951,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_refcounted(page); set_page_refcounted(page);
/* Fall through */ /* Fall through */
} }
folio = page_folio(page);
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
/* If allocation is not consuming a reservation, also store the /* If allocation is not consuming a reservation, also store the
* hugetlb_cgroup pointer on the page. * hugetlb_cgroup pointer on the page.
...@@ -2978,8 +2981,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2978,8 +2981,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
rsv_adjust = hugepage_subpool_put_pages(spool, 1); rsv_adjust = hugepage_subpool_put_pages(spool, 1);
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust);
if (deferred_reserve) if (deferred_reserve)
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), page); pages_per_huge_page(h), folio);
} }
return page; return page;
......
...@@ -346,11 +346,10 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, ...@@ -346,11 +346,10 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
/* /*
* Should be called with hugetlb_lock held * Should be called with hugetlb_lock held
*/ */
static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct page *page, bool rsvd) struct folio *folio, bool rsvd)
{ {
struct hugetlb_cgroup *h_cg; struct hugetlb_cgroup *h_cg;
struct folio *folio = page_folio(page);
if (hugetlb_cgroup_disabled()) if (hugetlb_cgroup_disabled())
return; return;
...@@ -368,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, ...@@ -368,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
css_put(&h_cg->css); css_put(&h_cg->css);
else { else {
unsigned long usage = unsigned long usage =
h_cg->nodeinfo[page_to_nid(page)]->usage[idx]; h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
/* /*
* This write is not atomic due to fetching usage and writing * This write is not atomic due to fetching usage and writing
* to it, but that's fine because we call this with * to it, but that's fine because we call this with
* hugetlb_lock held anyway. * hugetlb_lock held anyway.
*/ */
WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx], WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
usage - nr_pages); usage - nr_pages);
} }
} }
void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct page *page) struct folio *folio)
{ {
__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false); __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
} }
void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
struct page *page) struct folio *folio)
{ {
__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true); __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
} }
static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment