Commit 477e19de authored by Jianchao Wang's avatar Jianchao Wang Committed by Jens Axboe

blk-mq: adjust debugfs and sysfs register when updating nr_hw_queues

blk-mq debugfs and sysfs entries need to be removed before updating
queue map, otherwise, we get get wrong result there. This patch fixes
it and remove the redundant debugfs and sysfs register/unregister
operations during __blk_mq_update_nr_hw_queues.
Signed-off-by: default avatarJianchao Wang <jianchao.w.wang@oracle.com>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2d29c9f8
...@@ -2154,8 +2154,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -2154,8 +2154,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set, struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{ {
blk_mq_debugfs_unregister_hctx(hctx);
if (blk_mq_hw_queue_mapped(hctx)) if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx); blk_mq_tag_idle(hctx);
...@@ -2182,6 +2180,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, ...@@ -2182,6 +2180,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue) if (i == nr_queue)
break; break;
blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_exit_hctx(q, set, hctx, i); blk_mq_exit_hctx(q, set, hctx, i);
} }
} }
...@@ -2239,8 +2238,6 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -2239,8 +2238,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (hctx->flags & BLK_MQ_F_BLOCKING) if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu); init_srcu_struct(hctx->srcu);
blk_mq_debugfs_register_hctx(q, hctx);
return 0; return 0;
free_fq: free_fq:
...@@ -2529,8 +2526,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -2529,8 +2526,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int i, j; int i, j;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
blk_mq_sysfs_unregister(q);
/* protect against switching io scheduler */ /* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
...@@ -2578,7 +2573,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -2578,7 +2573,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
} }
q->nr_hw_queues = i; q->nr_hw_queues = i;
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
blk_mq_sysfs_register(q);
} }
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
...@@ -2676,25 +2670,6 @@ void blk_mq_free_queue(struct request_queue *q) ...@@ -2676,25 +2670,6 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
} }
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
/*
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
* we should change hctx numa_node according to the new topology (this
* involves freeing and re-allocating memory, worth doing?)
*/
blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
}
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{ {
int i; int i;
...@@ -3004,11 +2979,21 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -3004,11 +2979,21 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (!blk_mq_elv_switch_none(&head, q)) if (!blk_mq_elv_switch_none(&head, q))
goto switch_back; goto switch_back;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
}
set->nr_hw_queues = nr_hw_queues; set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set); blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) { list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q); blk_mq_realloc_hw_ctxs(set, q);
blk_mq_queue_reinit(q); blk_mq_map_swqueue(q);
}
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
} }
switch_back: switch_back:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment