Commit c054a78c authored by Yu Zhao's avatar Yu Zhao Committed by Linus Torvalds

memcg: refactor mem_cgroup_resize_limit()

mem_cgroup_resize_limit() and mem_cgroup_resize_memsw_limit() have
identical logics.  Refactor code so we don't need to keep two pieces of
code that does same thing.

Link: http://lkml.kernel.org/r/20180108224238.14583-1-yuzhao@google.comSigned-off-by: default avatarYu Zhao <yuzhao@google.com>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9c3760eb
...@@ -2461,13 +2461,15 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, ...@@ -2461,13 +2461,15 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
static DEFINE_MUTEX(memcg_limit_mutex); static DEFINE_MUTEX(memcg_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long limit) unsigned long limit, bool memsw)
{ {
unsigned long curusage; unsigned long curusage;
unsigned long oldusage; unsigned long oldusage;
bool enlarge = false; bool enlarge = false;
int retry_count; int retry_count;
int ret; int ret;
bool limits_invariant;
struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
/* /*
* For keeping hierarchical_reclaim simple, how long we should retry * For keeping hierarchical_reclaim simple, how long we should retry
...@@ -2477,7 +2479,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, ...@@ -2477,7 +2479,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES * retry_count = MEM_CGROUP_RECLAIM_RETRIES *
mem_cgroup_count_children(memcg); mem_cgroup_count_children(memcg);
oldusage = page_counter_read(&memcg->memory); oldusage = page_counter_read(counter);
do { do {
if (signal_pending(current)) { if (signal_pending(current)) {
...@@ -2486,73 +2488,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, ...@@ -2486,73 +2488,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
} }
mutex_lock(&memcg_limit_mutex); mutex_lock(&memcg_limit_mutex);
if (limit > memcg->memsw.limit) { /*
mutex_unlock(&memcg_limit_mutex); * Make sure that the new limit (memsw or memory limit) doesn't
ret = -EINVAL; * break our basic invariant rule memory.limit <= memsw.limit.
break; */
} limits_invariant = memsw ? limit >= memcg->memory.limit :
if (limit > memcg->memory.limit) limit <= memcg->memsw.limit;
enlarge = true; if (!limits_invariant) {
ret = page_counter_limit(&memcg->memory, limit);
mutex_unlock(&memcg_limit_mutex);
if (!ret)
break;
try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
curusage = page_counter_read(&memcg->memory);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
else
oldusage = curusage;
} while (retry_count);
if (!ret && enlarge)
memcg_oom_recover(memcg);
return ret;
}
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
unsigned long limit)
{
unsigned long curusage;
unsigned long oldusage;
bool enlarge = false;
int retry_count;
int ret;
/* see mem_cgroup_resize_res_limit */
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
mem_cgroup_count_children(memcg);
oldusage = page_counter_read(&memcg->memsw);
do {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
mutex_lock(&memcg_limit_mutex);
if (limit < memcg->memory.limit) {
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_limit_mutex);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
if (limit > memcg->memsw.limit) if (limit > counter->limit)
enlarge = true; enlarge = true;
ret = page_counter_limit(&memcg->memsw, limit); ret = page_counter_limit(counter, limit);
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_limit_mutex);
if (!ret) if (!ret)
break; break;
try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
curusage = page_counter_read(&memcg->memsw); curusage = page_counter_read(counter);
/* Usage is reduced ? */ /* Usage is reduced ? */
if (curusage >= oldusage) if (curusage >= oldusage)
retry_count--; retry_count--;
...@@ -3014,10 +2971,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of, ...@@ -3014,10 +2971,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
} }
switch (MEMFILE_TYPE(of_cft(of)->private)) { switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM: case _MEM:
ret = mem_cgroup_resize_limit(memcg, nr_pages); ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
break; break;
case _MEMSWAP: case _MEMSWAP:
ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
break; break;
case _KMEM: case _KMEM:
ret = memcg_update_kmem_limit(memcg, nr_pages); ret = memcg_update_kmem_limit(memcg, nr_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment