Commit 8ccdf4a3 authored by Jianchao Wang's avatar Jianchao Wang Committed by Jens Axboe

blk-mq: save queue mapping result into ctx directly

Currently, the queue mapping result is saved in a two-dimensional
array. In the hot path, to get a hctx, we need do following:

  q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]

This isn't very efficient. We could save the queue mapping result into
ctx directly with different hctx type, like,

  ctx->hctxs[type]
Signed-off-by: default avatarJianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 058fdecc
...@@ -321,7 +321,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) ...@@ -321,7 +321,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
bool ret = false; bool ret = false;
enum hctx_type type; enum hctx_type type;
......
...@@ -170,7 +170,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) ...@@ -170,7 +170,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
data->ctx = blk_mq_get_ctx(data->q); data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
data->ctx->cpu); data->ctx);
tags = blk_mq_tags_from_data(data); tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED) if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags; bt = &tags->breserved_tags;
......
...@@ -364,7 +364,7 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -364,7 +364,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
} }
if (likely(!data->hctx)) if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->hctx = blk_mq_map_queue(q, data->cmd_flags,
data->ctx->cpu); data->ctx);
if (data->cmd_flags & REQ_NOWAIT) if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT; data->flags |= BLK_MQ_REQ_NOWAIT;
...@@ -2435,7 +2435,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2435,7 +2435,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
continue; continue;
hctx = blk_mq_map_queue_type(q, j, i); hctx = blk_mq_map_queue_type(q, j, i);
ctx->hctxs[j] = hctx;
/* /*
* If the CPU is already set in the mask, then we've * If the CPU is already set in the mask, then we've
* mapped this one already. This can happen if * mapped this one already. This can happen if
......
...@@ -23,6 +23,7 @@ struct blk_mq_ctx { ...@@ -23,6 +23,7 @@ struct blk_mq_ctx {
unsigned int cpu; unsigned int cpu;
unsigned short index_hw[HCTX_MAX_TYPES]; unsigned short index_hw[HCTX_MAX_TYPES];
struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
/* incremented at dispatch time */ /* incremented at dispatch time */
unsigned long rq_dispatched[2]; unsigned long rq_dispatched[2];
...@@ -97,11 +98,11 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * ...@@ -97,11 +98,11 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue * @q: request queue
* @flags: request command flags * @flags: request command flags
* @cpu: CPU * @cpu: cpu ctx
*/ */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags, unsigned int flags,
unsigned int cpu) struct blk_mq_ctx *ctx)
{ {
enum hctx_type type = HCTX_TYPE_DEFAULT; enum hctx_type type = HCTX_TYPE_DEFAULT;
...@@ -116,7 +117,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, ...@@ -116,7 +117,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
q->tag_set->map[HCTX_TYPE_READ].nr_queues) q->tag_set->map[HCTX_TYPE_READ].nr_queues)
type = HCTX_TYPE_READ; type = HCTX_TYPE_READ;
return blk_mq_map_queue_type(q, type, cpu); return ctx->hctxs[type];
} }
/* /*
......
...@@ -38,7 +38,7 @@ extern struct ida blk_queue_ida; ...@@ -38,7 +38,7 @@ extern struct ida blk_queue_ida;
static inline struct blk_flush_queue * static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{ {
return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq; return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
} }
static inline void __blk_get_queue(struct request_queue *q) static inline void __blk_get_queue(struct request_queue *q)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment