Commit 6d76dcf4 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

hugetlb/cgroup: add charge/uncharge routines for hugetlb cgroup

Add the charge and uncharge routines for hugetlb cgroup.  We do cgroup
charging in page alloc and uncharge in compound page destructor.
Assigning page's hugetlb cgroup is protected by hugetlb_lock.

[liwp@linux.vnet.ibm.com: add huge_page_order check to avoid incorrect uncharge]
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarWanpeng Li <liwp.linux@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9dd540e2
...@@ -53,6 +53,16 @@ static inline bool hugetlb_cgroup_disabled(void) ...@@ -53,6 +53,16 @@ static inline bool hugetlb_cgroup_disabled(void)
return false; return false;
} }
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page);
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
#else #else
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{ {
...@@ -70,5 +80,33 @@ static inline bool hugetlb_cgroup_disabled(void) ...@@ -70,5 +80,33 @@ static inline bool hugetlb_cgroup_disabled(void)
return true; return true;
} }
static inline int
hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
return 0;
}
static inline void
hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page)
{
return;
}
static inline void
hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
{
return;
}
static inline void
hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg)
{
return;
}
#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
#endif #endif
...@@ -627,6 +627,8 @@ static void free_huge_page(struct page *page) ...@@ -627,6 +627,8 @@ static void free_huge_page(struct page *page)
BUG_ON(page_mapcount(page)); BUG_ON(page_mapcount(page));
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
/* remove the page from active list */ /* remove the page from active list */
list_del(&page->lru); list_del(&page->lru);
...@@ -1115,7 +1117,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -1115,7 +1117,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct page *page; struct page *page;
long chg; long chg;
int ret, idx;
struct hugetlb_cgroup *h_cg;
idx = hstate_index(h);
/* /*
* Processes that did not create the mapping will have no * Processes that did not create the mapping will have no
* reserves and will not have accounted against subpool * reserves and will not have accounted against subpool
...@@ -1131,6 +1136,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -1131,6 +1136,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
if (hugepage_subpool_get_pages(spool, chg)) if (hugepage_subpool_get_pages(spool, chg))
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
if (ret) {
hugepage_subpool_put_pages(spool, chg);
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
...@@ -1138,6 +1148,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -1138,6 +1148,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
if (!page) { if (!page) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE); page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) { if (!page) {
hugetlb_cgroup_uncharge_cgroup(idx,
pages_per_huge_page(h),
h_cg);
hugepage_subpool_put_pages(spool, chg); hugepage_subpool_put_pages(spool, chg);
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
...@@ -1146,7 +1159,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -1146,7 +1159,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_private(page, (unsigned long)spool); set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr); vma_commit_reservation(h, vma, addr);
/* update page cgroup details */
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
return page; return page;
} }
......
...@@ -111,6 +111,86 @@ static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup) ...@@ -111,6 +111,86 @@ static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
return -EBUSY; return -EBUSY;
} }
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
int ret = 0;
struct res_counter *fail_res;
struct hugetlb_cgroup *h_cg = NULL;
unsigned long csize = nr_pages * PAGE_SIZE;
if (hugetlb_cgroup_disabled())
goto done;
/*
* We don't charge any cgroup if the compound page have less
* than 3 pages.
*/
if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
goto done;
again:
rcu_read_lock();
h_cg = hugetlb_cgroup_from_task(current);
if (!css_tryget(&h_cg->css)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
css_put(&h_cg->css);
done:
*ptr = h_cg;
return ret;
}
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page)
{
if (hugetlb_cgroup_disabled() || !h_cg)
return;
spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, h_cg);
spin_unlock(&hugetlb_lock);
return;
}
/*
* Should be called with hugetlb_lock held
*/
void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page)
{
struct hugetlb_cgroup *h_cg;
unsigned long csize = nr_pages * PAGE_SIZE;
if (hugetlb_cgroup_disabled())
return;
VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
h_cg = hugetlb_cgroup_from_page(page);
if (unlikely(!h_cg))
return;
set_hugetlb_cgroup(page, NULL);
res_counter_uncharge(&h_cg->hugepage[idx], csize);
return;
}
void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg)
{
unsigned long csize = nr_pages * PAGE_SIZE;
if (hugetlb_cgroup_disabled() || !h_cg)
return;
if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
return;
res_counter_uncharge(&h_cg->hugepage[idx], csize);
return;
}
struct cgroup_subsys hugetlb_subsys = { struct cgroup_subsys hugetlb_subsys = {
.name = "hugetlb", .name = "hugetlb",
.create = hugetlb_cgroup_create, .create = hugetlb_cgroup_create,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment