Commit 90254a65 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds

memcg: clean up move charge

This patch cleans up move charge code by:

- define functions to handle pte for each types, and make
  is_target_pte_for_mc() cleaner.

- instead of checking the MOVE_CHARGE_TYPE_ANON bit, define a function
  that checks the bit.
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3c11ecf4
...@@ -266,6 +266,12 @@ static struct move_charge_struct { ...@@ -266,6 +266,12 @@ static struct move_charge_struct {
.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
}; };
static bool move_anon(void)
{
return test_bit(MOVE_CHARGE_TYPE_ANON,
&mc.to->move_charge_at_immigrate);
}
/* /*
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur. * limit reclaim to prevent infinite loops, if they ever occur.
...@@ -4162,50 +4168,66 @@ enum mc_target_type { ...@@ -4162,50 +4168,66 @@ enum mc_target_type {
MC_TARGET_SWAP, MC_TARGET_SWAP,
}; };
static int is_target_pte_for_mc(struct vm_area_struct *vma, static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, union mc_target *target) unsigned long addr, pte_t ptent)
{ {
struct page *page = NULL; struct page *page = vm_normal_page(vma, addr, ptent);
struct page_cgroup *pc;
int ret = 0;
swp_entry_t ent = { .val = 0 };
int usage_count = 0;
bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
&mc.to->move_charge_at_immigrate);
if (!pte_present(ptent)) { if (!page || !page_mapped(page))
/* TODO: handle swap of shmes/tmpfs */ return NULL;
if (pte_none(ptent) || pte_file(ptent)) if (PageAnon(page)) {
return 0; /* we don't move shared anon */
else if (is_swap_pte(ptent)) { if (!move_anon() || page_mapcount(page) > 2)
ent = pte_to_swp_entry(ptent); return NULL;
if (!move_anon || non_swap_entry(ent)) } else
return 0;
usage_count = mem_cgroup_count_swap_user(ent, &page);
}
} else {
page = vm_normal_page(vma, addr, ptent);
if (!page || !page_mapped(page))
return 0;
/* /*
* TODO: We don't move charges of file(including shmem/tmpfs) * TODO: We don't move charges of file(including shmem/tmpfs)
* pages for now. * pages for now.
*/ */
if (!move_anon || !PageAnon(page)) return NULL;
return 0; if (!get_page_unless_zero(page))
if (!get_page_unless_zero(page)) return NULL;
return 0;
usage_count = page_mapcount(page); return page;
} }
if (usage_count > 1) {
/* static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
* TODO: We don't move charges of shared(used by multiple unsigned long addr, pte_t ptent, swp_entry_t *entry)
* processes) pages for now. {
*/ int usage_count;
struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent);
if (!move_anon() || non_swap_entry(ent))
return NULL;
usage_count = mem_cgroup_count_swap_user(ent, &page);
if (usage_count > 1) { /* we don't move shared anon */
if (page) if (page)
put_page(page); put_page(page);
return 0; return NULL;
} }
if (do_swap_account)
entry->val = ent.val;
return page;
}
static int is_target_pte_for_mc(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, union mc_target *target)
{
struct page *page = NULL;
struct page_cgroup *pc;
int ret = 0;
swp_entry_t ent = { .val = 0 };
if (pte_present(ptent))
page = mc_handle_present_pte(vma, addr, ptent);
else if (is_swap_pte(ptent))
page = mc_handle_swap_pte(vma, addr, ptent, &ent);
/* TODO: handle swap of shmes/tmpfs */
if (!page && !ent.val)
return 0;
if (page) { if (page) {
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
/* /*
...@@ -4221,8 +4243,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma, ...@@ -4221,8 +4243,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
if (!ret || !target) if (!ret || !target)
put_page(page); put_page(page);
} }
/* throught */ /* There is a swap entry and a page doesn't exist or isn't charged */
if (ent.val && do_swap_account && !ret && if (ent.val && !ret &&
css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
ret = MC_TARGET_SWAP; ret = MC_TARGET_SWAP;
if (target) if (target)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment