Commit 0a5a7d0e authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blkcg: update blkg get functions take blkio_cgroup as parameter

In both blkg get functions - throtl_get_tg() and cfq_get_cfqg(),
instead of obtaining blkcg of %current explicitly, let the caller
specify the blkcg to use as parameter and make both functions hold on
to the blkcg.

This is part of block cgroup interface cleanup and will help making
blkcg API more modular.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2a7f1244
...@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) ...@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg; return tg;
} }
static struct throtl_grp * throtl_get_tg(struct throtl_data *td) static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
struct blkio_cgroup *blkcg)
{ {
struct throtl_grp *tg = NULL, *__tg = NULL; struct throtl_grp *tg = NULL, *__tg = NULL;
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue; struct request_queue *q = td->queue;
/* no throttling for dead queue */ /* no throttling for dead queue */
if (unlikely(blk_queue_bypass(q))) if (unlikely(blk_queue_bypass(q)))
return NULL; return NULL;
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg); tg = throtl_find_tg(td, blkcg);
if (tg) if (tg)
return tg; return tg;
if (!css_tryget(&blkcg->css))
return NULL;
/* /*
* Need to allocate a group. Allocation of group also needs allocation * Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence * of per cpu stats which in-turn takes a mutex() and can block. Hence
...@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/* Group allocated and queue is still alive. take the lock */ /* Group allocated and queue is still alive. take the lock */
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
css_put(&blkcg->css);
/* Make sure @q is still alive */ /* Make sure @q is still alive */
if (unlikely(blk_queue_bypass(q))) { if (unlikely(blk_queue_bypass(q))) {
...@@ -338,11 +341,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -338,11 +341,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
return NULL; return NULL;
} }
/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
blkcg = task_blkio_cgroup(current);
/* /*
* If some other thread already allocated the group while we were * If some other thread already allocated the group while we were
* not holding queue lock, free up the group * not holding queue lock, free up the group
...@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) ...@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
* IO group * IO group
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td); tg = throtl_get_tg(td, blkcg);
if (unlikely(!tg)) if (unlikely(!tg))
goto out_unlock; goto out_unlock;
......
...@@ -1122,17 +1122,19 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg) ...@@ -1122,17 +1122,19 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
* Search for the cfq group current task belongs to. request_queue lock must * Search for the cfq group current task belongs to. request_queue lock must
* be held. * be held.
*/ */
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
struct blkio_cgroup *blkcg)
{ {
struct blkio_cgroup *blkcg;
struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
blkcg = task_blkio_cgroup(current);
cfqg = cfq_find_cfqg(cfqd, blkcg); cfqg = cfq_find_cfqg(cfqd, blkcg);
if (cfqg) if (cfqg)
return cfqg; return cfqg;
if (!css_tryget(&blkcg->css))
return NULL;
/* /*
* Need to allocate a group. Allocation of group also needs allocation * Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence * of per cpu stats which in-turn takes a mutex() and can block. Hence
...@@ -1142,16 +1144,14 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) ...@@ -1142,16 +1144,14 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
* around by the time we return. CFQ queue allocation code does * around by the time we return. CFQ queue allocation code does
* the same. It might be racy though. * the same. It might be racy though.
*/ */
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
cfqg = cfq_alloc_cfqg(cfqd); cfqg = cfq_alloc_cfqg(cfqd);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rcu_read_lock(); rcu_read_lock();
blkcg = task_blkio_cgroup(current); css_put(&blkcg->css);
/* /*
* If some other thread already allocated the group while we were * If some other thread already allocated the group while we were
...@@ -1278,7 +1278,8 @@ static bool cfq_clear_queue(struct request_queue *q) ...@@ -1278,7 +1278,8 @@ static bool cfq_clear_queue(struct request_queue *q)
} }
#else /* GROUP_IOSCHED */ #else /* GROUP_IOSCHED */
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
struct blkio_cgroup *blkcg)
{ {
return &cfqd->root_group; return &cfqd->root_group;
} }
...@@ -2860,6 +2861,7 @@ static struct cfq_queue * ...@@ -2860,6 +2861,7 @@ static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, gfp_t gfp_mask) struct io_context *ioc, gfp_t gfp_mask)
{ {
struct blkio_cgroup *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_cq *cic; struct cfq_io_cq *cic;
struct cfq_group *cfqg; struct cfq_group *cfqg;
...@@ -2867,7 +2869,9 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, ...@@ -2867,7 +2869,9 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
retry: retry:
rcu_read_lock(); rcu_read_lock();
cfqg = cfq_get_cfqg(cfqd); blkcg = task_blkio_cgroup(current);
cfqg = cfq_get_cfqg(cfqd, blkcg);
cic = cfq_cic_lookup(cfqd, ioc); cic = cfq_cic_lookup(cfqd, ioc);
/* cic always exists here */ /* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync); cfqq = cic_to_cfqq(cic, is_sync);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment