Commit 32dad03d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup updates from Tejun Heo:
 "A lot of activities on the cgroup front.  Most changes aren't visible
  to userland at all at this point and are laying foundation for the
  planned unified hierarchy.

   - The biggest change is decoupling the lifetime management of css
     (cgroup_subsys_state) from that of cgroup's.  Because controllers
     (cpu, memory, block and so on) will need to be dynamically enabled
     and disabled, css which is the association point between a cgroup
     and a controller may come and go dynamically across the lifetime of
     a cgroup.  Till now, css's were created when the associated cgroup
     was created and stayed till the cgroup got destroyed.

     Assumptions around this tight coupling permeated through cgroup
     core and controllers.  These assumptions are gradually removed,
     which consists bulk of patches, and css destruction path is
     completely decoupled from cgroup destruction path.  Note that
     decoupling of creation path is relatively easy on top of these
     changes and the patchset is pending for the next window.

   - cgroup has its own event mechanism cgroup.event_control, which is
     only used by memcg.  It is overly complex trying to achieve high
     flexibility whose benefits seem dubious at best.  Going forward,
     new events will simply generate file modified event and the
     existing mechanism is being made specific to memcg.  This pull
     request contains prepatory patches for such change.

   - Various fixes and cleanups"

Fixed up conflict in kernel/cgroup.c as per Tejun.

* 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (69 commits)
  cgroup: fix cgroup_css() invocation in css_from_id()
  cgroup: make cgroup_write_event_control() use css_from_dir() instead of __d_cgrp()
  cgroup: make cgroup_event hold onto cgroup_subsys_state instead of cgroup
  cgroup: implement CFTYPE_NO_PREFIX
  cgroup: make cgroup_css() take cgroup_subsys * instead and allow NULL subsys
  cgroup: rename cgroup_css_from_dir() to css_from_dir() and update its syntax
  cgroup: fix cgroup_write_event_control()
  cgroup: fix subsystem file accesses on the root cgroup
  cgroup: change cgroup_from_id() to css_from_id()
  cgroup: use css_get() in cgroup_create() to check CSS_ROOT
  cpuset: remove an unncessary forward declaration
  cgroup: RCU protect each cgroup_subsys_state release
  cgroup: move subsys file removal to kill_css()
  cgroup: factor out kill_css()
  cgroup: decouple cgroup_subsys_state destruction from cgroup destruction
  cgroup: replace cgroup->css_kill_cnt with ->nr_css
  cgroup: bounce cgroup_subsys_state ref kill confirmation to a work item
  cgroup: move cgroup->subsys[] assignment to online_css()
  cgroup: reorganize css init / exit paths
  cgroup: add __rcu modifier to cgroup->subsys[]
  ...
parents 357397a1 d1625964
...@@ -437,10 +437,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl, ...@@ -437,10 +437,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
return &blkg->rl; return &blkg->rl;
} }
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, static int blkcg_reset_stats(struct cgroup_subsys_state *css,
u64 val) struct cftype *cftype, u64 val)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgroup); struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i; int i;
...@@ -614,15 +614,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) ...@@ -614,15 +614,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
{ {
struct blkcg_policy *pol = blkcg_policy[pd->plid]; struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg; struct blkcg_gq *pos_blkg;
struct cgroup *pos_cgrp; struct cgroup_subsys_state *pos_css;
u64 sum; u64 sum = 0;
lockdep_assert_held(pd->blkg->q->queue_lock); lockdep_assert_held(pd->blkg->q->queue_lock);
sum = blkg_stat_read((void *)pd + off);
rcu_read_lock(); rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
struct blkg_stat *stat = (void *)pos_pd + off; struct blkg_stat *stat = (void *)pos_pd + off;
...@@ -649,16 +647,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, ...@@ -649,16 +647,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
{ {
struct blkcg_policy *pol = blkcg_policy[pd->plid]; struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg; struct blkcg_gq *pos_blkg;
struct cgroup *pos_cgrp; struct cgroup_subsys_state *pos_css;
struct blkg_rwstat sum; struct blkg_rwstat sum = { };
int i; int i;
lockdep_assert_held(pd->blkg->q->queue_lock); lockdep_assert_held(pd->blkg->q->queue_lock);
sum = blkg_rwstat_read((void *)pd + off);
rcu_read_lock(); rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
struct blkg_rwstat *rwstat = (void *)pos_pd + off; struct blkg_rwstat *rwstat = (void *)pos_pd + off;
struct blkg_rwstat tmp; struct blkg_rwstat tmp;
...@@ -765,18 +761,18 @@ struct cftype blkcg_files[] = { ...@@ -765,18 +761,18 @@ struct cftype blkcg_files[] = {
/** /**
* blkcg_css_offline - cgroup css_offline callback * blkcg_css_offline - cgroup css_offline callback
* @cgroup: cgroup of interest * @css: css of interest
* *
* This function is called when @cgroup is about to go away and responsible * This function is called when @css is about to go away and responsible
* for shooting down all blkgs associated with @cgroup. blkgs should be * for shooting down all blkgs associated with @css. blkgs should be
* removed while holding both q and blkcg locks. As blkcg lock is nested * removed while holding both q and blkcg locks. As blkcg lock is nested
* inside q lock, this function performs reverse double lock dancing. * inside q lock, this function performs reverse double lock dancing.
* *
* This is the blkcg counterpart of ioc_release_fn(). * This is the blkcg counterpart of ioc_release_fn().
*/ */
static void blkcg_css_offline(struct cgroup *cgroup) static void blkcg_css_offline(struct cgroup_subsys_state *css)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgroup); struct blkcg *blkcg = css_to_blkcg(css);
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
...@@ -798,21 +794,21 @@ static void blkcg_css_offline(struct cgroup *cgroup) ...@@ -798,21 +794,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock);
} }
static void blkcg_css_free(struct cgroup *cgroup) static void blkcg_css_free(struct cgroup_subsys_state *css)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgroup); struct blkcg *blkcg = css_to_blkcg(css);
if (blkcg != &blkcg_root) if (blkcg != &blkcg_root)
kfree(blkcg); kfree(blkcg);
} }
static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup) static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
static atomic64_t id_seq = ATOMIC64_INIT(0); static atomic64_t id_seq = ATOMIC64_INIT(0);
struct blkcg *blkcg; struct blkcg *blkcg;
struct cgroup *parent = cgroup->parent;
if (!parent) { if (!parent_css) {
blkcg = &blkcg_root; blkcg = &blkcg_root;
goto done; goto done;
} }
...@@ -883,14 +879,15 @@ void blkcg_exit_queue(struct request_queue *q) ...@@ -883,14 +879,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change * of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc. * its cgroup only if it's the only owner of its ioc.
*/ */
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) static int blkcg_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{ {
struct task_struct *task; struct task_struct *task;
struct io_context *ioc; struct io_context *ioc;
int ret = 0; int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */ /* task_lock() is needed to avoid races with exit_io_context() */
cgroup_taskset_for_each(task, cgrp, tset) { cgroup_taskset_for_each(task, css, tset) {
task_lock(task); task_lock(task);
ioc = task->io_context; ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1) if (ioc && atomic_read(&ioc->nr_tasks) > 1)
...@@ -1127,7 +1124,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol) ...@@ -1127,7 +1124,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
/* kill the intf files first */ /* kill the intf files first */
if (pol->cftypes) if (pol->cftypes)
cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); cgroup_rm_cftypes(pol->cftypes);
/* unregister and update blkgs */ /* unregister and update blkgs */
blkcg_policy[pol->plid] = NULL; blkcg_policy[pol->plid] = NULL;
......
...@@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ...@@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
void blkg_conf_finish(struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx);
static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{ {
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), return css ? container_of(css, struct blkcg, css) : NULL;
struct blkcg, css);
} }
static inline struct blkcg *task_blkcg(struct task_struct *tsk) static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{ {
return container_of(task_subsys_state(tsk, blkio_subsys_id), return css_to_blkcg(task_css(tsk, blkio_subsys_id));
struct blkcg, css);
} }
static inline struct blkcg *bio_blkcg(struct bio *bio) static inline struct blkcg *bio_blkcg(struct bio *bio)
{ {
if (bio && bio->bi_css) if (bio && bio->bi_css)
return container_of(bio->bi_css, struct blkcg, css); return css_to_blkcg(bio->bi_css);
return task_blkcg(current); return task_blkcg(current);
} }
...@@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio) ...@@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
*/ */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{ {
struct cgroup *pcg = blkcg->css.cgroup->parent; return css_to_blkcg(css_parent(&blkcg->css));
return pcg ? cgroup_to_blkcg(pcg) : NULL;
} }
/** /**
...@@ -288,32 +284,33 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, ...@@ -288,32 +284,33 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
/** /**
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant * @d_blkg: loop cursor pointing to the current descendant
* @pos_cgrp: used for iteration * @pos_css: used for iteration
* @p_blkg: target blkg to walk descendants of * @p_blkg: target blkg to walk descendants of
* *
* Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
* read locked. If called under either blkcg or queue lock, the iteration * read locked. If called under either blkcg or queue lock, the iteration
* is guaranteed to include all and only online blkgs. The caller may * is guaranteed to include all and only online blkgs. The caller may
* update @pos_cgrp by calling cgroup_rightmost_descendant() to skip * update @pos_css by calling css_rightmost_descendant() to skip subtree.
* subtree. * @p_blkg is included in the iteration and the first node to be visited.
*/ */
#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \ #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false))) (p_blkg)->q, false)))
/** /**
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant * @d_blkg: loop cursor pointing to the current descendant
* @pos_cgrp: used for iteration * @pos_css: used for iteration
* @p_blkg: target blkg to walk descendants of * @p_blkg: target blkg to walk descendants of
* *
* Similar to blkg_for_each_descendant_pre() but performs post-order * Similar to blkg_for_each_descendant_pre() but performs post-order
* traversal instead. Synchronization rules are the same. * traversal instead. Synchronization rules are the same. @p_blkg is
* included in the iteration and the last node to be visited.
*/ */
#define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg) \ #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false))) (p_blkg)->q, false)))
/** /**
...@@ -576,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, ...@@ -576,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { } const struct blkcg_policy *pol) { }
static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
......
...@@ -1293,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, ...@@ -1293,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &rwstat); return __blkg_prfill_rwstat(sf, pd, &rwstat);
} }
static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl, blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
cft->private, true); cft->private, true);
...@@ -1325,31 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, ...@@ -1325,31 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
return __blkg_prfill_u64(sf, pd, v); return __blkg_prfill_u64(sf, pd, v);
} }
static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft, static int tg_print_conf_u64(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64, blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
&blkcg_policy_throtl, cft->private, false); &blkcg_policy_throtl, cft->private, false);
return 0; return 0;
} }
static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft, static int tg_print_conf_uint(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint, blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
&blkcg_policy_throtl, cft->private, false); &blkcg_policy_throtl, cft->private, false);
return 0; return 0;
} }
static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
bool is_u64) const char *buf, bool is_u64)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
struct blkg_conf_ctx ctx; struct blkg_conf_ctx ctx;
struct throtl_grp *tg; struct throtl_grp *tg;
struct throtl_service_queue *sq; struct throtl_service_queue *sq;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct cgroup *pos_cgrp; struct cgroup_subsys_state *pos_css;
int ret; int ret;
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
...@@ -1379,8 +1379,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, ...@@ -1379,8 +1379,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
* restrictions in the whole hierarchy and allows them to bypass * restrictions in the whole hierarchy and allows them to bypass
* blk-throttle. * blk-throttle.
*/ */
tg_update_has_rules(tg); blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg)
tg_update_has_rules(blkg_to_tg(blkg)); tg_update_has_rules(blkg_to_tg(blkg));
/* /*
...@@ -1403,16 +1402,16 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, ...@@ -1403,16 +1402,16 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
return 0; return 0;
} }
static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft, static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf) const char *buf)
{ {
return tg_set_conf(cgrp, cft, buf, true); return tg_set_conf(css, cft, buf, true);
} }
static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft, static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf) const char *buf)
{ {
return tg_set_conf(cgrp, cft, buf, false); return tg_set_conf(css, cft, buf, false);
} }
static struct cftype throtl_files[] = { static struct cftype throtl_files[] = {
...@@ -1623,7 +1622,7 @@ void blk_throtl_drain(struct request_queue *q) ...@@ -1623,7 +1622,7 @@ void blk_throtl_drain(struct request_queue *q)
{ {
struct throtl_data *td = q->td; struct throtl_data *td = q->td;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct cgroup *pos_cgrp; struct cgroup_subsys_state *pos_css;
struct bio *bio; struct bio *bio;
int rw; int rw;
...@@ -1636,11 +1635,9 @@ void blk_throtl_drain(struct request_queue *q) ...@@ -1636,11 +1635,9 @@ void blk_throtl_drain(struct request_queue *q)
* better to walk service_queue tree directly but blkg walk is * better to walk service_queue tree directly but blkg walk is
* easier. * easier.
*/ */
blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg) blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
tg_drain_bios(&blkg_to_tg(blkg)->service_queue); tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
tg_drain_bios(&td_root_tg(td)->service_queue);
/* finally, transfer bios from top-level tg's into the td */ /* finally, transfer bios from top-level tg's into the td */
tg_drain_bios(&td->service_queue); tg_drain_bios(&td->service_queue);
......
...@@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf, ...@@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
} }
static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
cfqg_prfill_weight_device, &blkcg_policy_cfq, 0, &blkcg_policy_cfq, 0, false);
false);
return 0; return 0;
} }
...@@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, ...@@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
} }
static int cfqg_print_leaf_weight_device(struct cgroup *cgrp, static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
struct cftype *cft, struct cftype *cft,
struct seq_file *sf) struct seq_file *sf)
{ {
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0, &blkcg_policy_cfq, 0, false);
false);
return 0; return 0;
} }
static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *sf) struct seq_file *sf)
{ {
seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight); seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
return 0; return 0;
} }
static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft, static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
seq_printf(sf, "%u\n", seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
return 0; return 0;
} }
static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
const char *buf, bool is_leaf_weight) struct cftype *cft, const char *buf,
bool is_leaf_weight)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
struct blkg_conf_ctx ctx; struct blkg_conf_ctx ctx;
struct cfq_group *cfqg; struct cfq_group *cfqg;
int ret; int ret;
...@@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, ...@@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
return ret; return ret;
} }
static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
const char *buf) struct cftype *cft, const char *buf)
{ {
return __cfqg_set_weight_device(cgrp, cft, buf, false); return __cfqg_set_weight_device(css, cft, buf, false);
} }
static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft, static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
const char *buf) struct cftype *cft, const char *buf)
{ {
return __cfqg_set_weight_device(cgrp, cft, buf, true); return __cfqg_set_weight_device(css, cft, buf, true);
} }
static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
bool is_leaf_weight) u64 val, bool is_leaf_weight)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
...@@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, ...@@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
return 0; return 0;
} }
static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val)
{ {
return __cfq_set_weight(cgrp, cft, val, false); return __cfq_set_weight(css, cft, val, false);
} }
static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{ {
return __cfq_set_weight(cgrp, cft, val, true); return __cfq_set_weight(css, cft, val, true);
} }
static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *sf) struct seq_file *sf)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq, blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
cft->private, false); cft->private, false);
return 0; return 0;
} }
static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq, blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
cft->private, true); cft->private, true);
...@@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, ...@@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &sum); return __blkg_prfill_rwstat(sf, pd, &sum);
} }
static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft, static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive, blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
&blkcg_policy_cfq, cft->private, false); &blkcg_policy_cfq, cft->private, false);
return 0; return 0;
} }
static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft, static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive, blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
&blkcg_policy_cfq, cft->private, true); &blkcg_policy_cfq, cft->private, true);
...@@ -1810,10 +1810,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, ...@@ -1810,10 +1810,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
} }
/* print avg_queue_size */ /* print avg_queue_size */
static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
struct seq_file *sf) struct cftype *cft, struct seq_file *sf)
{ {
struct blkcg *blkcg = cgroup_to_blkcg(cgrp); struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
&blkcg_policy_cfq, 0, false); &blkcg_policy_cfq, 0, false);
......
...@@ -1956,7 +1956,7 @@ int bio_associate_current(struct bio *bio) ...@@ -1956,7 +1956,7 @@ int bio_associate_current(struct bio *bio)
/* associate blkcg if exists */ /* associate blkcg if exists */
rcu_read_lock(); rcu_read_lock();
css = task_subsys_state(current, blkio_subsys_id); css = task_css(current, blkio_subsys_id);
if (css && css_tryget(css)) if (css && css_tryget(css))
bio->bi_css = css; bio->bi_css = css;
rcu_read_unlock(); rcu_read_unlock();
......
This diff is collapsed.
...@@ -85,7 +85,7 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); ...@@ -85,7 +85,7 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
static inline static inline
bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
......
...@@ -34,10 +34,12 @@ extern void vmpressure_cleanup(struct vmpressure *vmpr); ...@@ -34,10 +34,12 @@ extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css); extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
extern int vmpressure_register_event(struct cgroup *cg, struct cftype *cft, extern int vmpressure_register_event(struct cgroup_subsys_state *css,
struct cftype *cft,
struct eventfd_ctx *eventfd, struct eventfd_ctx *eventfd,
const char *args); const char *args);
extern void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft, extern void vmpressure_unregister_event(struct cgroup_subsys_state *css,
struct cftype *cft,
struct eventfd_ctx *eventfd); struct eventfd_ctx *eventfd);
#else #else
static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
......
...@@ -35,7 +35,7 @@ static inline u32 task_cls_classid(struct task_struct *p) ...@@ -35,7 +35,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
classid = container_of(task_subsys_state(p, net_cls_subsys_id), classid = container_of(task_css(p, net_cls_subsys_id),
struct cgroup_cls_state, css)->classid; struct cgroup_cls_state, css)->classid;
rcu_read_unlock(); rcu_read_unlock();
...@@ -51,7 +51,7 @@ static inline u32 task_cls_classid(struct task_struct *p) ...@@ -51,7 +51,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
css = task_subsys_state(p, net_cls_subsys_id); css = task_css(p, net_cls_subsys_id);
if (css) if (css)
classid = container_of(css, classid = container_of(css,
struct cgroup_cls_state, css)->classid; struct cgroup_cls_state, css)->classid;
......
...@@ -25,10 +25,6 @@ struct netprio_map { ...@@ -25,10 +25,6 @@ struct netprio_map {
u32 priomap[]; u32 priomap[];
}; };
struct cgroup_netprio_state {
struct cgroup_subsys_state css;
};
extern void sock_update_netprioidx(struct sock *sk); extern void sock_update_netprioidx(struct sock *sk);
#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP) #if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
...@@ -39,7 +35,7 @@ static inline u32 task_netprioidx(struct task_struct *p) ...@@ -39,7 +35,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
u32 idx; u32 idx;
rcu_read_lock(); rcu_read_lock();
css = task_subsys_state(p, net_prio_subsys_id); css = task_css(p, net_prio_subsys_id);
idx = css->cgroup->id; idx = css->cgroup->id;
rcu_read_unlock(); rcu_read_unlock();
return idx; return idx;
...@@ -53,7 +49,7 @@ static inline u32 task_netprioidx(struct task_struct *p) ...@@ -53,7 +49,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
u32 idx = 0; u32 idx = 0;
rcu_read_lock(); rcu_read_lock();
css = task_subsys_state(p, net_prio_subsys_id); css = task_css(p, net_prio_subsys_id);
if (css) if (css)
idx = css->cgroup->id; idx = css->cgroup->id;
rcu_read_unlock(); rcu_read_unlock();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -340,7 +340,7 @@ struct perf_cgroup { ...@@ -340,7 +340,7 @@ struct perf_cgroup {
static inline struct perf_cgroup * static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task) perf_cgroup_from_task(struct task_struct *task)
{ {
return container_of(task_subsys_state(task, perf_subsys_id), return container_of(task_css(task, perf_subsys_id),
struct perf_cgroup, css); struct perf_cgroup, css);
} }
...@@ -591,7 +591,9 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, ...@@ -591,7 +591,9 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
if (!f.file) if (!f.file)
return -EBADF; return -EBADF;
css = cgroup_css_from_dir(f.file, perf_subsys_id); rcu_read_lock();
css = css_from_dir(f.file->f_dentry, &perf_subsys);
if (IS_ERR(css)) { if (IS_ERR(css)) {
ret = PTR_ERR(css); ret = PTR_ERR(css);
goto out; goto out;
...@@ -617,6 +619,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, ...@@ -617,6 +619,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
ret = -EINVAL; ret = -EINVAL;
} }
out: out:
rcu_read_unlock();
fdput(f); fdput(f);
return ret; return ret;
} }
...@@ -7798,7 +7801,8 @@ static int __init perf_event_sysfs_init(void) ...@@ -7798,7 +7801,8 @@ static int __init perf_event_sysfs_init(void)
device_initcall(perf_event_sysfs_init); device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont) static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
struct perf_cgroup *jc; struct perf_cgroup *jc;
...@@ -7815,11 +7819,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont) ...@@ -7815,11 +7819,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
return &jc->css; return &jc->css;
} }
static void perf_cgroup_css_free(struct cgroup *cont) static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{ {
struct perf_cgroup *jc; struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
struct perf_cgroup, css);
free_percpu(jc->info); free_percpu(jc->info);
kfree(jc); kfree(jc);
} }
...@@ -7831,15 +7834,17 @@ static int __perf_cgroup_move(void *info) ...@@ -7831,15 +7834,17 @@ static int __perf_cgroup_move(void *info)
return 0; return 0;
} }
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) static void perf_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset) cgroup_taskset_for_each(task, css, tset)
task_function_call(task, __perf_cgroup_move, task); task_function_call(task, __perf_cgroup_move, task);
} }
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, static void perf_cgroup_exit(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task) struct task_struct *task)
{ {
/* /*
......
...@@ -6815,7 +6815,7 @@ void sched_move_task(struct task_struct *tsk) ...@@ -6815,7 +6815,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk); tsk->sched_class->put_prev_task(rq, tsk);
tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id, tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)), lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css); struct task_group, css);
tg = autogroup_task_group(tsk, tg); tg = autogroup_task_group(tsk, tg);
...@@ -7137,23 +7137,22 @@ int sched_rt_handler(struct ctl_table *table, int write, ...@@ -7137,23 +7137,22 @@ int sched_rt_handler(struct ctl_table *table, int write,
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
/* return corresponding task_group object of a cgroup */ static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
{ {
return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), return css ? container_of(css, struct task_group, css) : NULL;
struct task_group, css);
} }
static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
struct task_group *tg, *parent; struct task_group *parent = css_tg(parent_css);
struct task_group *tg;
if (!cgrp->parent) { if (!parent) {
/* This is early initialization for the top cgroup */ /* This is early initialization for the top cgroup */
return &root_task_group.css; return &root_task_group.css;
} }
parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent); tg = sched_create_group(parent);
if (IS_ERR(tg)) if (IS_ERR(tg))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -7161,41 +7160,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) ...@@ -7161,41 +7160,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
return &tg->css; return &tg->css;
} }
static int cpu_cgroup_css_online(struct cgroup *cgrp) static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{ {
struct task_group *tg = cgroup_tg(cgrp); struct task_group *tg = css_tg(css);
struct task_group *parent; struct task_group *parent = css_tg(css_parent(css));
if (!cgrp->parent)
return 0;
parent = cgroup_tg(cgrp->parent); if (parent)
sched_online_group(tg, parent); sched_online_group(tg, parent);
return 0; return 0;
} }
static void cpu_cgroup_css_free(struct cgroup *cgrp) static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{ {
struct task_group *tg = cgroup_tg(cgrp); struct task_group *tg = css_tg(css);
sched_destroy_group(tg); sched_destroy_group(tg);
} }
static void cpu_cgroup_css_offline(struct cgroup *cgrp) static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
{ {
struct task_group *tg = cgroup_tg(cgrp); struct task_group *tg = css_tg(css);
sched_offline_group(tg); sched_offline_group(tg);
} }
static int cpu_cgroup_can_attach(struct cgroup *cgrp, static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset) struct cgroup_taskset *tset)
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset) { cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), task)) if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL; return -EINVAL;
#else #else
/* We don't support RT-tasks being in separate groups */ /* We don't support RT-tasks being in separate groups */
...@@ -7206,17 +7202,17 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp, ...@@ -7206,17 +7202,17 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
return 0; return 0;
} }
static void cpu_cgroup_attach(struct cgroup *cgrp, static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset) struct cgroup_taskset *tset)
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset) cgroup_taskset_for_each(task, css, tset)
sched_move_task(task); sched_move_task(task);
} }
static void static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, struct cgroup_subsys_state *old_css,
struct task_struct *task) struct task_struct *task)
{ {
/* /*
...@@ -7231,15 +7227,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, ...@@ -7231,15 +7227,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
u64 shareval) struct cftype *cftype, u64 shareval)
{ {
return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval)); return sched_group_set_shares(css_tg(css), scale_load(shareval));
} }
static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{ {
struct task_group *tg = cgroup_tg(cgrp); struct task_group *tg = css_tg(css);
return (u64) scale_load_down(tg->shares); return (u64) scale_load_down(tg->shares);
} }
...@@ -7361,26 +7358,28 @@ long tg_get_cfs_period(struct task_group *tg) ...@@ -7361,26 +7358,28 @@ long tg_get_cfs_period(struct task_group *tg)
return cfs_period_us; return cfs_period_us;
} }
static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft) static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{ {
return tg_get_cfs_quota(cgroup_tg(cgrp)); return tg_get_cfs_quota(css_tg(css));
} }
static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype, static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
s64 cfs_quota_us) struct cftype *cftype, s64 cfs_quota_us)
{ {
return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us); return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
} }
static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{ {
return tg_get_cfs_period(cgroup_tg(cgrp)); return tg_get_cfs_period(css_tg(css));
} }
static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
u64 cfs_period_us) struct cftype *cftype, u64 cfs_period_us)
{ {
return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); return tg_set_cfs_period(css_tg(css), cfs_period_us);
} }
struct cfs_schedulable_data { struct cfs_schedulable_data {
...@@ -7461,10 +7460,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) ...@@ -7461,10 +7460,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
return ret; return ret;
} }
static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
struct cgroup_map_cb *cb) struct cgroup_map_cb *cb)
{ {
struct task_group *tg = cgroup_tg(cgrp); struct task_group *tg = css_tg(css);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods); cb->fill(cb, "nr_periods", cfs_b->nr_periods);
...@@ -7477,26 +7476,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, ...@@ -7477,26 +7476,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
s64 val) struct cftype *cft, s64 val)
{ {
return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); return sched_group_set_rt_runtime(css_tg(css), val);
} }
static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{ {
return sched_group_rt_runtime(cgroup_tg(cgrp)); return sched_group_rt_runtime(css_tg(css));
} }
static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
u64 rt_period_us) struct cftype *cftype, u64 rt_period_us)
{ {
return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); return sched_group_set_rt_period(css_tg(css), rt_period_us);
} }
static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
struct cftype *cft)
{ {
return sched_group_rt_period(cgroup_tg(cgrp)); return sched_group_rt_period(css_tg(css));
} }
#endif /* CONFIG_RT_GROUP_SCHED */ #endif /* CONFIG_RT_GROUP_SCHED */
......
...@@ -33,30 +33,20 @@ struct cpuacct { ...@@ -33,30 +33,20 @@ struct cpuacct {
struct kernel_cpustat __percpu *cpustat; struct kernel_cpustat __percpu *cpustat;
}; };
/* return cpu accounting group corresponding to this container */ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
{ {
return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), return css ? container_of(css, struct cpuacct, css) : NULL;
struct cpuacct, css);
} }
/* return cpu accounting group to which this task belongs */ /* return cpu accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk) static inline struct cpuacct *task_ca(struct task_struct *tsk)
{ {
return container_of(task_subsys_state(tsk, cpuacct_subsys_id), return css_ca(task_css(tsk, cpuacct_subsys_id));
struct cpuacct, css);
}
static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
{
return cgroup_ca(ca->css.cgroup->parent);
} }
static inline struct cpuacct *parent_ca(struct cpuacct *ca) static inline struct cpuacct *parent_ca(struct cpuacct *ca)
{ {
if (!ca->css.cgroup->parent) return css_ca(css_parent(&ca->css));
return NULL;
return cgroup_ca(ca->css.cgroup->parent);
} }
static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
...@@ -66,11 +56,12 @@ static struct cpuacct root_cpuacct = { ...@@ -66,11 +56,12 @@ static struct cpuacct root_cpuacct = {
}; };
/* create a new cpu accounting group */ /* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp) static struct cgroup_subsys_state *
cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
struct cpuacct *ca; struct cpuacct *ca;
if (!cgrp->parent) if (!parent_css)
return &root_cpuacct.css; return &root_cpuacct.css;
ca = kzalloc(sizeof(*ca), GFP_KERNEL); ca = kzalloc(sizeof(*ca), GFP_KERNEL);
...@@ -96,9 +87,9 @@ static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp) ...@@ -96,9 +87,9 @@ static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
} }
/* destroy an existing cpu accounting group */ /* destroy an existing cpu accounting group */
static void cpuacct_css_free(struct cgroup *cgrp) static void cpuacct_css_free(struct cgroup_subsys_state *css)
{ {
struct cpuacct *ca = cgroup_ca(cgrp); struct cpuacct *ca = css_ca(css);
free_percpu(ca->cpustat); free_percpu(ca->cpustat);
free_percpu(ca->cpuusage); free_percpu(ca->cpuusage);
...@@ -141,9 +132,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) ...@@ -141,9 +132,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
} }
/* return total cpu usage (in nanoseconds) of a group */ /* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
{ {
struct cpuacct *ca = cgroup_ca(cgrp); struct cpuacct *ca = css_ca(css);
u64 totalcpuusage = 0; u64 totalcpuusage = 0;
int i; int i;
...@@ -153,10 +144,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) ...@@ -153,10 +144,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
return totalcpuusage; return totalcpuusage;
} }
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
u64 reset) u64 reset)
{ {
struct cpuacct *ca = cgroup_ca(cgrp); struct cpuacct *ca = css_ca(css);
int err = 0; int err = 0;
int i; int i;
...@@ -172,10 +163,10 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, ...@@ -172,10 +163,10 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
return err; return err;
} }
static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, static int cpuacct_percpu_seq_read(struct cgroup_subsys_state *css,
struct seq_file *m) struct cftype *cft, struct seq_file *m)
{ {
struct cpuacct *ca = cgroup_ca(cgroup); struct cpuacct *ca = css_ca(css);
u64 percpu; u64 percpu;
int i; int i;
...@@ -192,10 +183,10 @@ static const char * const cpuacct_stat_desc[] = { ...@@ -192,10 +183,10 @@ static const char * const cpuacct_stat_desc[] = {
[CPUACCT_STAT_SYSTEM] = "system", [CPUACCT_STAT_SYSTEM] = "system",
}; };
static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, static int cpuacct_stats_show(struct cgroup_subsys_state *css,
struct cgroup_map_cb *cb) struct cftype *cft, struct cgroup_map_cb *cb)
{ {
struct cpuacct *ca = cgroup_ca(cgrp); struct cpuacct *ca = css_ca(css);
int cpu; int cpu;
s64 val = 0; s64 val = 0;
...@@ -281,7 +272,7 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) ...@@ -281,7 +272,7 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
while (ca != &root_cpuacct) { while (ca != &root_cpuacct) {
kcpustat = this_cpu_ptr(ca->cpustat); kcpustat = this_cpu_ptr(ca->cpustat);
kcpustat->cpustat[index] += val; kcpustat->cpustat[index] += val;
ca = __parent_ca(ca); ca = parent_ca(ca);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
......
...@@ -665,9 +665,9 @@ extern int group_balance_cpu(struct sched_group *sg); ...@@ -665,9 +665,9 @@ extern int group_balance_cpu(struct sched_group *sg);
/* /*
* Return the group to which this tasks belongs. * Return the group to which this tasks belongs.
* *
* We cannot use task_subsys_state() and friends because the cgroup * We cannot use task_css() and friends because the cgroup subsystem
* subsystem changes that value before the cgroup_subsys::attach() method * changes that value before the cgroup_subsys::attach() method is called,
* is called, therefore we cannot pin it and might observe the wrong value. * therefore we cannot pin it and might observe the wrong value.
* *
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
* core changes this before calling sched_move_task(). * core changes this before calling sched_move_task().
......
...@@ -36,21 +36,13 @@ static struct hugetlb_cgroup *root_h_cgroup __read_mostly; ...@@ -36,21 +36,13 @@ static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
static inline static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{ {
return container_of(s, struct hugetlb_cgroup, css); return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
}
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
{
return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
hugetlb_subsys_id));
} }
static inline static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
{ {
return hugetlb_cgroup_from_css(task_subsys_state(task, return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id));
hugetlb_subsys_id));
} }
static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
...@@ -58,17 +50,15 @@ static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) ...@@ -58,17 +50,15 @@ static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
return (h_cg == root_h_cgroup); return (h_cg == root_h_cgroup);
} }
static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg) static inline struct hugetlb_cgroup *
parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
{ {
if (!cg->parent) return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
return NULL;
return hugetlb_cgroup_from_cgroup(cg->parent);
} }
static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg) static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
{ {
int idx; int idx;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
for (idx = 0; idx < hugetlb_max_hstate; idx++) { for (idx = 0; idx < hugetlb_max_hstate; idx++) {
if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0) if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
...@@ -77,19 +67,18 @@ static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg) ...@@ -77,19 +67,18 @@ static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
return false; return false;
} }
static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgroup) static struct cgroup_subsys_state *
hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
struct hugetlb_cgroup *h_cgroup;
int idx; int idx;
struct cgroup *parent_cgroup;
struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL); h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
if (!h_cgroup) if (!h_cgroup)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
parent_cgroup = cgroup->parent; if (parent_h_cgroup) {
if (parent_cgroup) {
parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
res_counter_init(&h_cgroup->hugepage[idx], res_counter_init(&h_cgroup->hugepage[idx],
&parent_h_cgroup->hugepage[idx]); &parent_h_cgroup->hugepage[idx]);
...@@ -101,11 +90,11 @@ static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgrou ...@@ -101,11 +90,11 @@ static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgrou
return &h_cgroup->css; return &h_cgroup->css;
} }
static void hugetlb_cgroup_css_free(struct cgroup *cgroup) static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
{ {
struct hugetlb_cgroup *h_cgroup; struct hugetlb_cgroup *h_cgroup;
h_cgroup = hugetlb_cgroup_from_cgroup(cgroup); h_cgroup = hugetlb_cgroup_from_css(css);
kfree(h_cgroup); kfree(h_cgroup);
} }
...@@ -117,15 +106,14 @@ static void hugetlb_cgroup_css_free(struct cgroup *cgroup) ...@@ -117,15 +106,14 @@ static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
* page reference and test for page active here. This function * page reference and test for page active here. This function
* cannot fail. * cannot fail.
*/ */
static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup, static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
struct page *page) struct page *page)
{ {
int csize; int csize;
struct res_counter *counter; struct res_counter *counter;
struct res_counter *fail_res; struct res_counter *fail_res;
struct hugetlb_cgroup *page_hcg; struct hugetlb_cgroup *page_hcg;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup); struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
page_hcg = hugetlb_cgroup_from_page(page); page_hcg = hugetlb_cgroup_from_page(page);
/* /*
...@@ -155,8 +143,9 @@ static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup, ...@@ -155,8 +143,9 @@ static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
* Force the hugetlb cgroup to empty the hugetlb resources by moving them to * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
* the parent cgroup. * the parent cgroup.
*/ */
static void hugetlb_cgroup_css_offline(struct cgroup *cgroup) static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
{ {
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h; struct hstate *h;
struct page *page; struct page *page;
int idx = 0; int idx = 0;
...@@ -165,13 +154,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup *cgroup) ...@@ -165,13 +154,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
for_each_hstate(h) { for_each_hstate(h) {
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_activelist, lru) list_for_each_entry(page, &h->hugepage_activelist, lru)
hugetlb_cgroup_move_parent(idx, cgroup, page); hugetlb_cgroup_move_parent(idx, h_cg, page);
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
idx++; idx++;
} }
cond_resched(); cond_resched();
} while (hugetlb_cgroup_have_usage(cgroup)); } while (hugetlb_cgroup_have_usage(h_cg));
} }
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
...@@ -253,14 +242,15 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, ...@@ -253,14 +242,15 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
return; return;
} }
static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft, static ssize_t hugetlb_cgroup_read(struct cgroup_subsys_state *css,
struct file *file, char __user *buf, struct cftype *cft, struct file *file,
size_t nbytes, loff_t *ppos) char __user *buf, size_t nbytes,
loff_t *ppos)
{ {
u64 val; u64 val;
char str[64]; char str[64];
int idx, name, len; int idx, name, len;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup); struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(cft->private); idx = MEMFILE_IDX(cft->private);
name = MEMFILE_ATTR(cft->private); name = MEMFILE_ATTR(cft->private);
...@@ -270,12 +260,12 @@ static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft, ...@@ -270,12 +260,12 @@ static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
return simple_read_from_buffer(buf, nbytes, ppos, str, len); return simple_read_from_buffer(buf, nbytes, ppos, str, len);
} }
static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft, static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
const char *buffer) struct cftype *cft, const char *buffer)
{ {
int idx, name, ret; int idx, name, ret;
unsigned long long val; unsigned long long val;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup); struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(cft->private); idx = MEMFILE_IDX(cft->private);
name = MEMFILE_ATTR(cft->private); name = MEMFILE_ATTR(cft->private);
...@@ -300,10 +290,11 @@ static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft, ...@@ -300,10 +290,11 @@ static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
return ret; return ret;
} }
static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event) static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
unsigned int event)
{ {
int idx, name, ret = 0; int idx, name, ret = 0;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup); struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(event); idx = MEMFILE_IDX(event);
name = MEMFILE_ATTR(event); name = MEMFILE_ATTR(event);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -132,10 +132,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) ...@@ -132,10 +132,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
return 0; return 0;
} }
static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer) const char *buffer)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unsigned long long val; unsigned long long val;
int ret = 0; int ret = 0;
...@@ -180,9 +180,9 @@ static u64 tcp_read_usage(struct mem_cgroup *memcg) ...@@ -180,9 +180,9 @@ static u64 tcp_read_usage(struct mem_cgroup *memcg)
return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE); return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
} }
static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft) static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
u64 val; u64 val;
switch (cft->private) { switch (cft->private) {
...@@ -202,13 +202,13 @@ static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft) ...@@ -202,13 +202,13 @@ static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
return val; return val;
} }
static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event) static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct tcp_memcontrol *tcp; struct tcp_memcontrol *tcp;
struct cg_proto *cg_proto; struct cg_proto *cg_proto;
memcg = mem_cgroup_from_cont(cont); memcg = mem_cgroup_from_css(css);
cg_proto = tcp_prot.proto_cgroup(memcg); cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto) if (!cg_proto)
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment