Commit 26a9750a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: improve the blk_mq_init_allocated_queue interface

Don't return the passed in request_queue but a normal error code, and
drop the elevator_init argument in favor of just calling elevator_init_mq
directly from dm-rq.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Link: https://lore.kernel.org/r/20210602065345.355274-3-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cdb14e0f
...@@ -3115,21 +3115,18 @@ void blk_mq_release(struct request_queue *q) ...@@ -3115,21 +3115,18 @@ void blk_mq_release(struct request_queue *q)
struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
void *queuedata) void *queuedata)
{ {
struct request_queue *uninit_q, *q; struct request_queue *q;
int ret;
uninit_q = blk_alloc_queue(set->numa_node); q = blk_alloc_queue(set->numa_node);
if (!uninit_q) if (!q)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
uninit_q->queuedata = queuedata; q->queuedata = queuedata;
ret = blk_mq_init_allocated_queue(set, q);
/* if (ret) {
* Initialize the queue without an elevator. device_add_disk() will do blk_cleanup_queue(q);
* the initialization. return ERR_PTR(ret);
*/ }
q = blk_mq_init_allocated_queue(set, uninit_q, false);
if (IS_ERR(q))
blk_cleanup_queue(uninit_q);
return q; return q;
} }
EXPORT_SYMBOL_GPL(blk_mq_init_queue_data); EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
...@@ -3273,9 +3270,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -3273,9 +3270,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
} }
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q, struct request_queue *q)
bool elevator_init)
{ {
/* mark the queue as mq asap */ /* mark the queue as mq asap */
q->mq_ops = set->ops; q->mq_ops = set->ops;
...@@ -3325,11 +3321,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -3325,11 +3321,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q); blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q); blk_mq_map_swqueue(q);
return 0;
if (elevator_init)
elevator_init_mq(q);
return q;
err_hctxs: err_hctxs:
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);
...@@ -3340,7 +3332,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -3340,7 +3332,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->poll_cb = NULL; q->poll_cb = NULL;
err_exit: err_exit:
q->mq_ops = NULL; q->mq_ops = NULL;
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
EXPORT_SYMBOL(blk_mq_init_allocated_queue); EXPORT_SYMBOL(blk_mq_init_allocated_queue);
......
...@@ -192,7 +192,6 @@ void blk_account_io_done(struct request *req, u64 now); ...@@ -192,7 +192,6 @@ void blk_account_io_done(struct request *req, u64 now);
void blk_insert_flush(struct request *rq); void blk_insert_flush(struct request *rq);
void elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q, int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e); struct elevator_type *new_e);
void __elevator_exit(struct request_queue *, struct elevator_queue *); void __elevator_exit(struct request_queue *, struct elevator_queue *);
......
...@@ -693,7 +693,7 @@ void elevator_init_mq(struct request_queue *q) ...@@ -693,7 +693,7 @@ void elevator_init_mq(struct request_queue *q)
elevator_put(e); elevator_put(e);
} }
} }
EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
/* /*
* switch to new_e io scheduler. be careful not to introduce deadlocks - * switch to new_e io scheduler. be careful not to introduce deadlocks -
......
...@@ -530,7 +530,6 @@ static const struct blk_mq_ops dm_mq_ops = { ...@@ -530,7 +530,6 @@ static const struct blk_mq_ops dm_mq_ops = {
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{ {
struct request_queue *q;
struct dm_target *immutable_tgt; struct dm_target *immutable_tgt;
int err; int err;
...@@ -557,12 +556,10 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) ...@@ -557,12 +556,10 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
if (err) if (err)
goto out_kfree_tag_set; goto out_kfree_tag_set;
q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true); err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
if (IS_ERR(q)) { if (err)
err = PTR_ERR(q);
goto out_tag_set; goto out_tag_set;
} elevator_init_mq(md->queue);
return 0; return 0;
out_tag_set: out_tag_set:
......
...@@ -429,9 +429,8 @@ enum { ...@@ -429,9 +429,8 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
void *queuedata); void *queuedata);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q, struct request_queue *q);
bool elevator_init);
struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, const struct blk_mq_ops *ops,
unsigned int queue_depth, unsigned int queue_depth,
......
...@@ -120,6 +120,7 @@ extern void elv_merged_request(struct request_queue *, struct request *, ...@@ -120,6 +120,7 @@ extern void elv_merged_request(struct request_queue *, struct request *,
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *);
void elevator_init_mq(struct request_queue *q);
/* /*
* io scheduler registration * io scheduler registration
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment