Commit 72a0a36e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: support at_head inserations for blk_execute_rq

This is neede for proper SG_IO operation as well as various uses of
blk_execute_rq from the SCSI midlayer.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 96d2e8b5
...@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set * be resued after dying flag is set
*/ */
if (q->mq_ops) { if (q->mq_ops) {
blk_mq_insert_request(q, rq, true); blk_mq_insert_request(q, rq, at_head, true);
return; return;
} }
......
...@@ -714,12 +714,15 @@ static void blk_mq_work_fn(struct work_struct *work) ...@@ -714,12 +714,15 @@ static void blk_mq_work_fn(struct work_struct *work)
} }
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
struct request *rq) struct request *rq, bool at_head)
{ {
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq); trace_block_rq_insert(hctx->queue, rq);
if (at_head)
list_add(&rq->queuelist, &ctx->rq_list);
else
list_add_tail(&rq->queuelist, &ctx->rq_list); list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx); blk_mq_hctx_mark_pending(hctx, ctx);
...@@ -730,7 +733,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, ...@@ -730,7 +733,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
} }
void blk_mq_insert_request(struct request_queue *q, struct request *rq, void blk_mq_insert_request(struct request_queue *q, struct request *rq,
bool run_queue) bool at_head, bool run_queue)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx; struct blk_mq_ctx *ctx, *current_ctx;
...@@ -749,7 +752,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq, ...@@ -749,7 +752,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
rq->mq_ctx = ctx; rq->mq_ctx = ctx;
} }
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq); __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx); blk_mq_put_ctx(current_ctx);
...@@ -781,7 +784,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async) ...@@ -781,7 +784,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
/* ctx->cpu might be offline */ /* ctx->cpu might be offline */
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq); __blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx); blk_mq_put_ctx(current_ctx);
...@@ -819,7 +822,7 @@ static void blk_mq_insert_requests(struct request_queue *q, ...@@ -819,7 +822,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist); rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
rq->mq_ctx = ctx; rq->mq_ctx = ctx;
__blk_mq_insert_request(hctx, rq); __blk_mq_insert_request(hctx, rq, false);
} }
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
...@@ -971,7 +974,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -971,7 +974,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_mq_free_request(hctx, ctx, rq); __blk_mq_free_request(hctx, ctx, rq);
else { else {
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
__blk_mq_insert_request(hctx, rq); __blk_mq_insert_request(hctx, rq, false);
} }
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
......
...@@ -122,7 +122,8 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc ...@@ -122,7 +122,8 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request_queue *, struct request *, bool); void blk_mq_insert_request(struct request_queue *, struct request *,
bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq); void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *); bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment