Commit a9520cd6 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blkcg: make blkcg_policy methods take a pointer to blkcg_policy_data

The newly added ->pd_alloc_fn() and ->pd_free_fn() deal with pd
(blkg_policy_data) while the older ones use blkg (blkcg_gq).  As using
blkg doesn't make sense for ->pd_alloc_fn() and after allocation pd
can always be mapped to blkg and given that these are policy-specific
methods, it makes sense to converge on pd.

This patch makes all methods deal with pd instead of blkg.  Most
conversions are trivial.  In blk-cgroup.c, a couple method invocation
sites now test whether pd exists instead of policy state for
consistency.  This shouldn't cause any behavioral differences.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent b2ce2643
......@@ -242,7 +242,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_init_fn)
pol->pd_init_fn(blkg);
pol->pd_init_fn(blkg->pd[i]);
}
/* insert */
......@@ -256,7 +256,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_online_fn)
pol->pd_online_fn(blkg);
pol->pd_online_fn(blkg->pd[i]);
}
}
blkg->online = true;
......@@ -347,7 +347,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_offline_fn)
pol->pd_offline_fn(blkg);
pol->pd_offline_fn(blkg->pd[i]);
}
blkg->online = false;
......@@ -468,9 +468,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkcg_policy_enabled(blkg->q, pol) &&
pol->pd_reset_stats_fn)
pol->pd_reset_stats_fn(blkg);
if (blkg->pd[i] && pol->pd_reset_stats_fn)
pol->pd_reset_stats_fn(blkg->pd[i]);
}
}
......@@ -1076,7 +1075,7 @@ int blkcg_activate_policy(struct request_queue *q,
pd->blkg = blkg;
pd->plid = pol->plid;
if (pol->pd_init_fn)
pol->pd_init_fn(blkg);
pol->pd_init_fn(pd);
}
__set_bit(pol->plid, q->blkcg_pols);
......@@ -1116,10 +1115,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);
if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg);
if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
......
......@@ -377,9 +377,10 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
return &tg->pd;
}
static void throtl_pd_init(struct blkcg_gq *blkg)
static void throtl_pd_init(struct blkg_policy_data *pd)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_grp *tg = pd_to_tg(pd);
struct blkcg_gq *blkg = tg_to_blkg(tg);
struct throtl_data *td = blkg->q->td;
struct throtl_service_queue *sq = &tg->service_queue;
......@@ -417,13 +418,13 @@ static void tg_update_has_rules(struct throtl_grp *tg)
(tg->bps[rw] != -1 || tg->iops[rw] != -1);
}
static void throtl_pd_online(struct blkcg_gq *blkg)
static void throtl_pd_online(struct blkg_policy_data *pd)
{
/*
* We don't want new groups to escape the limits of its ancestors.
* Update has_rules[] after a new group is brought online.
*/
tg_update_has_rules(blkg_to_tg(blkg));
tg_update_has_rules(pd_to_tg(pd));
}
static void throtl_pd_free(struct blkg_policy_data *pd)
......@@ -435,9 +436,9 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
kfree(tg);
}
static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
static void throtl_pd_reset_stats(struct blkg_policy_data *pd)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_grp *tg = pd_to_tg(pd);
int cpu;
for_each_possible_cpu(cpu) {
......
......@@ -1597,18 +1597,18 @@ static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
return &cfqg->pd;
}
static void cfq_pd_init(struct blkcg_gq *blkg)
static void cfq_pd_init(struct blkg_policy_data *pd)
{
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
struct cfq_group_data *cgd = blkcg_to_cfqgd(blkg->blkcg);
struct cfq_group *cfqg = pd_to_cfqg(pd);
struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
cfqg->weight = cgd->weight;
cfqg->leaf_weight = cgd->leaf_weight;
}
static void cfq_pd_offline(struct blkcg_gq *blkg)
static void cfq_pd_offline(struct blkg_policy_data *pd)
{
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
struct cfq_group *cfqg = pd_to_cfqg(pd);
int i;
for (i = 0; i < IOPRIO_BE_NR; i++) {
......@@ -1661,9 +1661,9 @@ static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *
return a;
}
static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
{
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
struct cfq_group *cfqg = pd_to_cfqg(pd);
cfqg_stats_reset(&cfqg->stats);
cfqg_stats_reset(&cfqg->dead_stats);
......
......@@ -125,11 +125,11 @@ struct blkcg_gq {
typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
struct blkcg_policy {
int plid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment