Commit f3552655 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: introduce blk_init_flush and its pair

These two temporary functions are introduced for holding flush
initialization and de-initialization, so that we can
introduce 'flush queue' easier in the following patch. And
once 'flush queue' and its allocation/free functions are ready,
they will be removed for sake of code readability.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 1bcb1ead
......@@ -705,8 +705,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
if (!q->flush_rq)
if (blk_init_flush(q))
return NULL;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
......@@ -742,7 +741,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
return q;
fail:
kfree(q->flush_rq);
blk_exit_flush(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
......
......@@ -472,7 +472,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
}
EXPORT_SYMBOL(blkdev_issue_flush);
int blk_mq_init_flush(struct request_queue *q)
static int blk_mq_init_flush(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
......@@ -485,3 +485,20 @@ int blk_mq_init_flush(struct request_queue *q)
return -ENOMEM;
return 0;
}
int blk_init_flush(struct request_queue *q)
{
if (q->mq_ops)
return blk_mq_init_flush(q);
q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
if (!q->flush_rq)
return -ENOMEM;
return 0;
}
void blk_exit_flush(struct request_queue *q)
{
kfree(q->flush_rq);
}
......@@ -1859,7 +1859,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_add_queue_tag_set(set, q);
if (blk_mq_init_flush(q))
if (blk_init_flush(q))
goto err_hw_queues;
blk_mq_map_swqueue(q);
......
......@@ -27,7 +27,6 @@ struct blk_mq_ctx {
void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
int blk_mq_init_flush(struct request_queue *q);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
......
......@@ -517,11 +517,11 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
blk_exit_flush(q);
if (q->mq_ops)
blk_mq_free_queue(q);
kfree(q->flush_rq);
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
......
......@@ -22,6 +22,9 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
int blk_init_flush(struct request_queue *q);
void blk_exit_flush(struct request_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_list *rl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment