Commit cc6e3b10 authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe

block: prepare mq request creation to use REQ_OPs

This patch modifies the blk mq request creation code to use
separate variables for the operation and flags, because in the
the next patches the struct request users will be converted like
was done for bios.
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent e6a40b09
...@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) ...@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
EXPORT_SYMBOL(blk_mq_can_queue); EXPORT_SYMBOL(blk_mq_can_queue);
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
struct request *rq, unsigned int rw_flags) struct request *rq, int op,
unsigned int op_flags)
{ {
if (blk_queue_io_stat(q)) if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT; op_flags |= REQ_IO_STAT;
INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */ /* csd/requeue_work/fifo_time is initialized before use */
rq->q = q; rq->q = q;
rq->mq_ctx = ctx; rq->mq_ctx = ctx;
rq->cmd_flags |= rw_flags; req_set_op_attrs(rq, op, op_flags);
/* do not touch atomic flags, it needs atomic ops against the timer */ /* do not touch atomic flags, it needs atomic ops against the timer */
rq->cpu = -1; rq->cpu = -1;
INIT_HLIST_NODE(&rq->hash); INIT_HLIST_NODE(&rq->hash);
...@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ...@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->end_io_data = NULL; rq->end_io_data = NULL;
rq->next_rq = NULL; rq->next_rq = NULL;
ctx->rq_dispatched[rw_is_sync(rw_flags)]++; ctx->rq_dispatched[rw_is_sync(op | op_flags)]++;
} }
static struct request * static struct request *
__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
{ {
struct request *rq; struct request *rq;
unsigned int tag; unsigned int tag;
...@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) ...@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
} }
rq->tag = tag; rq->tag = tag;
blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
return rq; return rq;
} }
...@@ -246,7 +247,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, ...@@ -246,7 +247,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw); rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) {
__blk_mq_run_hw_queue(hctx); __blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
...@@ -254,7 +255,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, ...@@ -254,7 +255,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
ctx = blk_mq_get_ctx(q); ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw); rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
ctx = alloc_data.ctx; ctx = alloc_data.ctx;
} }
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
...@@ -1169,7 +1170,8 @@ static struct request *blk_mq_map_request(struct request_queue *q, ...@@ -1169,7 +1170,8 @@ static struct request *blk_mq_map_request(struct request_queue *q,
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
struct request *rq; struct request *rq;
int rw = bio_data_dir(bio); int op = bio_data_dir(bio);
int op_flags = 0;
struct blk_mq_alloc_data alloc_data; struct blk_mq_alloc_data alloc_data;
blk_queue_enter_live(q); blk_queue_enter_live(q);
...@@ -1177,20 +1179,20 @@ static struct request *blk_mq_map_request(struct request_queue *q, ...@@ -1177,20 +1179,20 @@ static struct request *blk_mq_map_request(struct request_queue *q,
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
if (rw_is_sync(bio->bi_rw)) if (rw_is_sync(bio->bi_rw))
rw |= REQ_SYNC; op_flags |= REQ_SYNC;
trace_block_getrq(q, bio, rw); trace_block_getrq(q, bio, op);
blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
if (unlikely(!rq)) { if (unlikely(!rq)) {
__blk_mq_run_hw_queue(hctx); __blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw); trace_block_sleeprq(q, bio, op);
ctx = blk_mq_get_ctx(q); ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
ctx = alloc_data.ctx; ctx = alloc_data.ctx;
hctx = alloc_data.hctx; hctx = alloc_data.hctx;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment