Commit f64c3f54 authored by Balbir Singh's avatar Balbir Singh Committed by Linus Torvalds

memory controller: soft limit organize cgroups

Organize cgroups over soft limit in a RB-Tree

Introduce an RB-Tree for storing memory cgroups that are over their soft
limit.  The overall goal is to

1. Add a memory cgroup to the RB-Tree when the soft limit is exceeded.
   We are careful about updates, updates take place only after a particular
   time interval has passed
2. We remove the node from the RB-Tree when the usage goes below the soft
   limit

The next set of patches will exploit the RB-Tree to get the group that is
over its soft limit by the largest amount and reclaim from it, when we
face memory contention.

[hugh.dickins@tiscali.co.uk: CONFIG_CGROUP_MEM_RES_CTLR=y CONFIG_PREEMPT=y fails to boot]
Signed-off-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Jiri Slaby <jirislaby@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 296c81d8
...@@ -114,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); ...@@ -114,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
int __must_check res_counter_charge_locked(struct res_counter *counter, int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val); unsigned long val);
int __must_check res_counter_charge(struct res_counter *counter, int __must_check res_counter_charge(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at); unsigned long val, struct res_counter **limit_fail_at,
struct res_counter **soft_limit_at);
/* /*
* uncharge - tell that some portion of the resource is released * uncharge - tell that some portion of the resource is released
...@@ -127,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter, ...@@ -127,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter,
*/ */
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
void res_counter_uncharge(struct res_counter *counter, unsigned long val); void res_counter_uncharge(struct res_counter *counter, unsigned long val,
bool *was_soft_limit_excess);
static inline bool res_counter_limit_check_locked(struct res_counter *cnt) static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
{ {
......
...@@ -37,17 +37,27 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val) ...@@ -37,17 +37,27 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
} }
int res_counter_charge(struct res_counter *counter, unsigned long val, int res_counter_charge(struct res_counter *counter, unsigned long val,
struct res_counter **limit_fail_at) struct res_counter **limit_fail_at,
struct res_counter **soft_limit_fail_at)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
struct res_counter *c, *u; struct res_counter *c, *u;
*limit_fail_at = NULL; *limit_fail_at = NULL;
if (soft_limit_fail_at)
*soft_limit_fail_at = NULL;
local_irq_save(flags); local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) { for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock); spin_lock(&c->lock);
ret = res_counter_charge_locked(c, val); ret = res_counter_charge_locked(c, val);
/*
* With soft limits, we return the highest ancestor
* that exceeds its soft limit
*/
if (soft_limit_fail_at &&
!res_counter_soft_limit_check_locked(c))
*soft_limit_fail_at = c;
spin_unlock(&c->lock); spin_unlock(&c->lock);
if (ret < 0) { if (ret < 0) {
*limit_fail_at = c; *limit_fail_at = c;
...@@ -75,7 +85,8 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) ...@@ -75,7 +85,8 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
counter->usage -= val; counter->usage -= val;
} }
void res_counter_uncharge(struct res_counter *counter, unsigned long val) void res_counter_uncharge(struct res_counter *counter, unsigned long val,
bool *was_soft_limit_excess)
{ {
unsigned long flags; unsigned long flags;
struct res_counter *c; struct res_counter *c;
...@@ -83,6 +94,9 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val) ...@@ -83,6 +94,9 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
local_irq_save(flags); local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) { for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock); spin_lock(&c->lock);
if (was_soft_limit_excess)
*was_soft_limit_excess =
!res_counter_soft_limit_check_locked(c);
res_counter_uncharge_locked(c, val); res_counter_uncharge_locked(c, val);
spin_unlock(&c->lock); spin_unlock(&c->lock);
} }
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment