Commit a8908939 authored by Jens Axboe's avatar Jens Axboe

blk-mq: kill q->mq_map

It's just a pointer to set->mq_map, use that instead. Move the
assignment a bit earlier, so we always know it's valid.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a0fedc85
......@@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i];
hctx_idx = set->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
......@@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
q->mq_map[i] = 0;
set->mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
......@@ -2430,8 +2430,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{
q->tag_set = set;
mutex_lock(&set->tag_list_lock);
/*
......@@ -2468,8 +2466,6 @@ void blk_mq_release(struct request_queue *q)
kobject_put(&hctx->kobj);
}
q->mq_map = NULL;
kfree(q->queue_hw_ctx);
/*
......@@ -2589,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int node;
struct blk_mq_hw_ctx *hctx;
node = blk_mq_hw_queue_to_node(q->mq_map, i);
node = blk_mq_hw_queue_to_node(set->mq_map, i);
/*
* If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback
......@@ -2666,8 +2662,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx)
goto err_percpu;
q->mq_map = set->mq_map;
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
......@@ -2676,6 +2670,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids;
q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
......
......@@ -75,7 +75,9 @@ extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
int cpu)
{
return q->queue_hw_ctx[q->mq_map[cpu]];
struct blk_mq_tag_set *set = q->tag_set;
return q->queue_hw_ctx[set->mq_map[cpu]];
}
/*
......
......@@ -409,8 +409,6 @@ struct request_queue {
const struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment