Commit 8d32ff84 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

memcg: clean up existing move charge code

- Replace lengthy function name is_target_pte_for_mc() with a shorter
  one in order to avoid ugly line breaks.

- explicitly use MC_TARGET_* instead of simply using integers.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: default avatarHillf Danton <dhillf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a4884288
......@@ -5110,7 +5110,7 @@ static int mem_cgroup_do_precharge(unsigned long count)
}
/**
* is_target_pte_for_mc - check a pte whether it is valid for move charge
* get_mctgt_type - get target type of moving charge
* @vma: the vma the pte to be checked belongs
* @addr: the address corresponding to the pte to be checked
* @ptent: the pte to be checked
......@@ -5133,7 +5133,7 @@ union mc_target {
};
enum mc_target_type {
MC_TARGET_NONE, /* not used */
MC_TARGET_NONE = 0,
MC_TARGET_PAGE,
MC_TARGET_SWAP,
};
......@@ -5214,12 +5214,12 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
return page;
}
static int is_target_pte_for_mc(struct vm_area_struct *vma,
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, union mc_target *target)
{
struct page *page = NULL;
struct page_cgroup *pc;
int ret = 0;
enum mc_target_type ret = MC_TARGET_NONE;
swp_entry_t ent = { .val = 0 };
if (pte_present(ptent))
......@@ -5230,7 +5230,7 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
page = mc_handle_file_pte(vma, addr, ptent, &ent);
if (!page && !ent.val)
return 0;
return ret;
if (page) {
pc = lookup_page_cgroup(page);
/*
......@@ -5270,7 +5270,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
if (get_mctgt_type(vma, addr, *pte, NULL))
mc.precharge++; /* increment precharge temporarily */
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
......@@ -5442,8 +5442,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
if (!mc.precharge)
break;
type = is_target_pte_for_mc(vma, addr, ptent, &target);
switch (type) {
switch (get_mctgt_type(vma, addr, ptent, &target)) {
case MC_TARGET_PAGE:
page = target.page;
if (isolate_lru_page(page))
......@@ -5456,7 +5455,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
mc.moved_charge++;
}
putback_lru_page(page);
put: /* is_target_pte_for_mc() gets the page */
put: /* get_mctgt_type() gets the page */
put_page(page);
break;
case MC_TARGET_SWAP:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment