Commit 7d76f856 authored by Dongli Zhang's avatar Dongli Zhang Committed by Jens Axboe

blk-mq: use HCTX_TYPE_DEFAULT but not 0 to index blk_mq_tag_set->map

Replace set->map[0] with set->map[HCTX_TYPE_DEFAULT] to avoid hardcoding.
Signed-off-by: default avatarDongli Zhang <dongli.zhang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5b88a17c
...@@ -2061,7 +2061,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, ...@@ -2061,7 +2061,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
int node; int node;
node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
node = set->numa_node; node = set->numa_node;
...@@ -2117,7 +2117,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, ...@@ -2117,7 +2117,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
size_t rq_size, left; size_t rq_size, left;
int node; int node;
node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
node = set->numa_node; node = set->numa_node;
...@@ -2416,7 +2416,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2416,7 +2416,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx. * If the cpu isn't present, the cpu is mapped to first hctx.
*/ */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
hctx_idx = set->map[0].mq_map[i]; hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */ /* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] && if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) { !__blk_mq_alloc_rq_map(set, hctx_idx)) {
...@@ -2426,7 +2426,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2426,7 +2426,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* case, remap the current ctx to hctx[0] which * case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated * is guaranteed to always have tags allocated
*/ */
set->map[0].mq_map[i] = 0; set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
} }
ctx = per_cpu_ptr(q->queue_ctx, i); ctx = per_cpu_ptr(q->queue_ctx, i);
...@@ -2733,7 +2733,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -2733,7 +2733,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int node; int node;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
node = blk_mq_hw_queue_to_node(&set->map[0], i); node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
/* /*
* If the hw queue has been mapped to another numa node, * If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback * we need to realloc the hctx. If allocation fails, fallback
...@@ -2964,7 +2964,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) ...@@ -2964,7 +2964,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
return set->ops->map_queues(set); return set->ops->map_queues(set);
} else { } else {
BUG_ON(set->nr_maps > 1); BUG_ON(set->nr_maps > 1);
return blk_mq_map_queues(&set->map[0]); return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
} }
} }
...@@ -3234,7 +3234,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -3234,7 +3234,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
nr_hw_queues, prev_nr_hw_queues); nr_hw_queues, prev_nr_hw_queues);
set->nr_hw_queues = prev_nr_hw_queues; set->nr_hw_queues = prev_nr_hw_queues;
blk_mq_map_queues(&set->map[0]); blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
goto fallback; goto fallback;
} }
blk_mq_map_swqueue(q); blk_mq_map_swqueue(q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment