Commit 600c3b0c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: open code __blk_mq_alloc_request in blk_mq_alloc_request_hctx

blk_mq_alloc_request_hctx is only used for NVMeoF connect commands, so
tailor it to the specific requirements, and don't bother the general
fast path code with its special twinkles.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.de
Reviewed-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 76647368
...@@ -351,21 +351,13 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) ...@@ -351,21 +351,13 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
{ {
struct request_queue *q = data->q; struct request_queue *q = data->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
unsigned int tag;
bool clear_ctx_on_error = false;
u64 alloc_time_ns = 0; u64 alloc_time_ns = 0;
unsigned int tag;
/* alloc_time includes depth and tag waits */ /* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q)) if (blk_queue_rq_alloc_time(q))
alloc_time_ns = ktime_get_ns(); alloc_time_ns = ktime_get_ns();
if (likely(!data->ctx)) {
data->ctx = blk_mq_get_ctx(q);
clear_ctx_on_error = true;
}
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->cmd_flags,
data->ctx);
if (data->cmd_flags & REQ_NOWAIT) if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT; data->flags |= BLK_MQ_REQ_NOWAIT;
...@@ -381,17 +373,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) ...@@ -381,17 +373,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
e->type->ops.limit_depth && e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED)) !(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.limit_depth(data->cmd_flags, data); e->type->ops.limit_depth(data->cmd_flags, data);
} else {
blk_mq_tag_busy(data->hctx);
} }
data->ctx = blk_mq_get_ctx(q);
data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
if (!(data->flags & BLK_MQ_REQ_INTERNAL))
blk_mq_tag_busy(data->hctx);
tag = blk_mq_get_tag(data); tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_NO_TAG) { if (tag == BLK_MQ_NO_TAG)
if (clear_ctx_on_error)
data->ctx = NULL;
return NULL; return NULL;
}
return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
} }
...@@ -431,17 +422,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -431,17 +422,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
.flags = flags, .flags = flags,
.cmd_flags = op, .cmd_flags = op,
}; };
struct request *rq; u64 alloc_time_ns = 0;
unsigned int cpu; unsigned int cpu;
unsigned int tag;
int ret; int ret;
/* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q))
alloc_time_ns = ktime_get_ns();
/* /*
* If the tag allocator sleeps we could get an allocation for a * If the tag allocator sleeps we could get an allocation for a
* different hardware context. No need to complicate the low level * different hardware context. No need to complicate the low level
* allocator for this for the rare use case of a command tied to * allocator for this for the rare use case of a command tied to
* a specific queue. * a specific queue.
*/ */
if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (hctx_idx >= q->nr_hw_queues) if (hctx_idx >= q->nr_hw_queues)
...@@ -462,11 +458,17 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -462,11 +458,17 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
data.ctx = __blk_mq_get_ctx(q, cpu); data.ctx = __blk_mq_get_ctx(q, cpu);
if (q->elevator)
data.flags |= BLK_MQ_REQ_INTERNAL;
else
blk_mq_tag_busy(data.hctx);
ret = -EWOULDBLOCK; ret = -EWOULDBLOCK;
rq = __blk_mq_alloc_request(&data); tag = blk_mq_get_tag(&data);
if (!rq) if (tag == BLK_MQ_NO_TAG)
goto out_queue_exit; goto out_queue_exit;
return rq; return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
out_queue_exit: out_queue_exit:
blk_queue_exit(q); blk_queue_exit(q);
return ERR_PTR(ret); return ERR_PTR(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment