Commit 29b12589 authored by Vivek Goyal's avatar Vivek Goyal Committed by Jens Axboe

blk-throttle: Dynamically allocate root group

Currently, we allocate root throtl_grp statically. But as we will be
introducing per cpu stat pointers and that will be allocated
dynamically even for root group, we might as well make whole root
throtl_grp allocation dynamic and treat it in same manner as other
groups.
Signed-off-by: default avatarVivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent f469a7b4
...@@ -88,7 +88,7 @@ struct throtl_data ...@@ -88,7 +88,7 @@ struct throtl_data
/* service tree for active throtl groups */ /* service tree for active throtl groups */
struct throtl_rb_root tg_service_tree; struct throtl_rb_root tg_service_tree;
struct throtl_grp root_tg; struct throtl_grp *root_tg;
struct request_queue *queue; struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */ /* Total Number of queued bios on READ and WRITE lists */
...@@ -233,7 +233,7 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) ...@@ -233,7 +233,7 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
* Avoid lookup in this case * Avoid lookup in this case
*/ */
if (blkcg == &blkio_root_cgroup) if (blkcg == &blkio_root_cgroup)
tg = &td->root_tg; tg = td->root_tg;
else else
tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
...@@ -313,7 +313,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -313,7 +313,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/* Group allocation failed. Account the IO to root group */ /* Group allocation failed. Account the IO to root group */
if (!tg) { if (!tg) {
tg = &td->root_tg; tg = td->root_tg;
return tg; return tg;
} }
...@@ -1153,18 +1153,16 @@ int blk_throtl_init(struct request_queue *q) ...@@ -1153,18 +1153,16 @@ int blk_throtl_init(struct request_queue *q)
td->limits_changed = false; td->limits_changed = false;
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
/* Init root group */ /* alloc and Init root group. */
tg = &td->root_tg; td->queue = q;
throtl_init_group(tg); tg = throtl_alloc_tg(td);
/* if (!tg) {
* Set root group reference to 2. One reference will be dropped when kfree(td);
* all groups on tg_list are being deleted during queue exit. Other return -ENOMEM;
* reference will remain there as we don't want to delete this group }
* as it is statically allocated and gets destroyed when throtl_data
* goes away. td->root_tg = tg;
*/
atomic_inc(&tg->ref);
rcu_read_lock(); rcu_read_lock();
blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
...@@ -1173,7 +1171,6 @@ int blk_throtl_init(struct request_queue *q) ...@@ -1173,7 +1171,6 @@ int blk_throtl_init(struct request_queue *q)
throtl_add_group_to_td_list(td, tg); throtl_add_group_to_td_list(td, tg);
/* Attach throtl data to request queue */ /* Attach throtl data to request queue */
td->queue = q;
q->td = td; q->td = td;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment