Commit f9cd4bfe authored by Jens Axboe's avatar Jens Axboe

block: get rid of MQ scheduler ops union

This is a remnant of when we had ops for both SQ and MQ
schedulers. Now it's just MQ, so get rid of the union.
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a1ce35fa
...@@ -5724,7 +5724,7 @@ static struct elv_fs_entry bfq_attrs[] = { ...@@ -5724,7 +5724,7 @@ static struct elv_fs_entry bfq_attrs[] = {
}; };
static struct elevator_type iosched_bfq_mq = { static struct elevator_type iosched_bfq_mq = {
.ops.mq = { .ops = {
.limit_depth = bfq_limit_depth, .limit_depth = bfq_limit_depth,
.prepare_request = bfq_prepare_request, .prepare_request = bfq_prepare_request,
.requeue_request = bfq_finish_requeue_request, .requeue_request = bfq_finish_requeue_request,
......
...@@ -48,8 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq) ...@@ -48,8 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq)
if (icq->flags & ICQ_EXITED) if (icq->flags & ICQ_EXITED)
return; return;
if (et->ops.mq.exit_icq) if (et->ops.exit_icq)
et->ops.mq.exit_icq(icq); et->ops.exit_icq(icq);
icq->flags |= ICQ_EXITED; icq->flags |= ICQ_EXITED;
} }
...@@ -396,8 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, ...@@ -396,8 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list); hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list); list_add(&icq->q_node, &q->icq_list);
if (et->ops.mq.init_icq) if (et->ops.init_icq)
et->ops.mq.init_icq(icq); et->ops.init_icq(icq);
} else { } else {
kmem_cache_free(et->icq_cache, icq); kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q); icq = ioc_lookup_icq(ioc, q);
......
...@@ -85,14 +85,13 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) ...@@ -85,14 +85,13 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
do { do {
struct request *rq; struct request *rq;
if (e->type->ops.mq.has_work && if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
!e->type->ops.mq.has_work(hctx))
break; break;
if (!blk_mq_get_dispatch_budget(hctx)) if (!blk_mq_get_dispatch_budget(hctx))
break; break;
rq = e->type->ops.mq.dispatch_request(hctx); rq = e->type->ops.dispatch_request(hctx);
if (!rq) { if (!rq) {
blk_mq_put_dispatch_budget(hctx); blk_mq_put_dispatch_budget(hctx);
break; break;
...@@ -163,7 +162,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) ...@@ -163,7 +162,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{ {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
LIST_HEAD(rq_list); LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */ /* RCU or SRCU read lock is needed before checking quiesced flag */
...@@ -314,9 +313,9 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) ...@@ -314,9 +313,9 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
bool ret = false; bool ret = false;
if (e && e->type->ops.mq.bio_merge) { if (e && e->type->ops.bio_merge) {
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
return e->type->ops.mq.bio_merge(hctx, bio); return e->type->ops.bio_merge(hctx, bio);
} }
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
...@@ -380,11 +379,11 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, ...@@ -380,11 +379,11 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
goto run; goto run;
if (e && e->type->ops.mq.insert_requests) { if (e && e->type->ops.insert_requests) {
LIST_HEAD(list); LIST_HEAD(list);
list_add(&rq->queuelist, &list); list_add(&rq->queuelist, &list);
e->type->ops.mq.insert_requests(hctx, &list, at_head); e->type->ops.insert_requests(hctx, &list, at_head);
} else { } else {
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head); __blk_mq_insert_request(hctx, rq, at_head);
...@@ -403,8 +402,8 @@ void blk_mq_sched_insert_requests(struct request_queue *q, ...@@ -403,8 +402,8 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct elevator_queue *e = hctx->queue->elevator; struct elevator_queue *e = hctx->queue->elevator;
if (e && e->type->ops.mq.insert_requests) if (e && e->type->ops.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false); e->type->ops.insert_requests(hctx, list, false);
else { else {
/* /*
* try to issue requests directly if the hw queue isn't * try to issue requests directly if the hw queue isn't
...@@ -489,15 +488,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -489,15 +488,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
goto err; goto err;
} }
ret = e->ops.mq.init_sched(q, e); ret = e->ops.init_sched(q, e);
if (ret) if (ret)
goto err; goto err;
blk_mq_debugfs_register_sched(q); blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.mq.init_hctx) { if (e->ops.init_hctx) {
ret = e->ops.mq.init_hctx(hctx, i); ret = e->ops.init_hctx(hctx, i);
if (ret) { if (ret) {
eq = q->elevator; eq = q->elevator;
blk_mq_exit_sched(q, eq); blk_mq_exit_sched(q, eq);
...@@ -523,14 +522,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) ...@@ -523,14 +522,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_unregister_sched_hctx(hctx); blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) { if (e->type->ops.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, i); e->type->ops.exit_hctx(hctx, i);
hctx->sched_data = NULL; hctx->sched_data = NULL;
} }
} }
blk_mq_debugfs_unregister_sched(q); blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.mq.exit_sched) if (e->type->ops.exit_sched)
e->type->ops.mq.exit_sched(e); e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q); blk_mq_sched_tags_teardown(q);
q->elevator = NULL; q->elevator = NULL;
} }
...@@ -43,8 +43,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, ...@@ -43,8 +43,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.allow_merge) if (e && e->type->ops.allow_merge)
return e->type->ops.mq.allow_merge(q, rq, bio); return e->type->ops.allow_merge(q, rq, bio);
return true; return true;
} }
...@@ -53,8 +53,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) ...@@ -53,8 +53,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{ {
struct elevator_queue *e = rq->q->elevator; struct elevator_queue *e = rq->q->elevator;
if (e && e->type->ops.mq.completed_request) if (e && e->type->ops.completed_request)
e->type->ops.mq.completed_request(rq, now); e->type->ops.completed_request(rq, now);
} }
static inline void blk_mq_sched_started_request(struct request *rq) static inline void blk_mq_sched_started_request(struct request *rq)
...@@ -62,8 +62,8 @@ static inline void blk_mq_sched_started_request(struct request *rq) ...@@ -62,8 +62,8 @@ static inline void blk_mq_sched_started_request(struct request *rq)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.started_request) if (e && e->type->ops.started_request)
e->type->ops.mq.started_request(rq); e->type->ops.started_request(rq);
} }
static inline void blk_mq_sched_requeue_request(struct request *rq) static inline void blk_mq_sched_requeue_request(struct request *rq)
...@@ -71,16 +71,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) ...@@ -71,16 +71,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.requeue_request) if (e && e->type->ops.requeue_request)
e->type->ops.mq.requeue_request(rq); e->type->ops.requeue_request(rq);
} }
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
{ {
struct elevator_queue *e = hctx->queue->elevator; struct elevator_queue *e = hctx->queue->elevator;
if (e && e->type->ops.mq.has_work) if (e && e->type->ops.has_work)
return e->type->ops.mq.has_work(hctx); return e->type->ops.has_work(hctx);
return false; return false;
} }
......
...@@ -363,9 +363,9 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -363,9 +363,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
* dispatch list. Don't include reserved tags in the * dispatch list. Don't include reserved tags in the
* limiting, as it isn't useful. * limiting, as it isn't useful.
*/ */
if (!op_is_flush(op) && e->type->ops.mq.limit_depth && if (!op_is_flush(op) && e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED)) !(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.mq.limit_depth(op, data); e->type->ops.limit_depth(op, data);
} else { } else {
blk_mq_tag_busy(data->hctx); blk_mq_tag_busy(data->hctx);
} }
...@@ -383,11 +383,11 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -383,11 +383,11 @@ static struct request *blk_mq_get_request(struct request_queue *q,
rq = blk_mq_rq_ctx_init(data, tag, op); rq = blk_mq_rq_ctx_init(data, tag, op);
if (!op_is_flush(op)) { if (!op_is_flush(op)) {
rq->elv.icq = NULL; rq->elv.icq = NULL;
if (e && e->type->ops.mq.prepare_request) { if (e && e->type->ops.prepare_request) {
if (e->type->icq_cache && rq_ioc(bio)) if (e->type->icq_cache && rq_ioc(bio))
blk_mq_sched_assign_ioc(rq, bio); blk_mq_sched_assign_ioc(rq, bio);
e->type->ops.mq.prepare_request(rq, bio); e->type->ops.prepare_request(rq, bio);
rq->rq_flags |= RQF_ELVPRIV; rq->rq_flags |= RQF_ELVPRIV;
} }
} }
...@@ -491,8 +491,8 @@ void blk_mq_free_request(struct request *rq) ...@@ -491,8 +491,8 @@ void blk_mq_free_request(struct request *rq)
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
if (rq->rq_flags & RQF_ELVPRIV) { if (rq->rq_flags & RQF_ELVPRIV) {
if (e && e->type->ops.mq.finish_request) if (e && e->type->ops.finish_request)
e->type->ops.mq.finish_request(rq); e->type->ops.finish_request(rq);
if (rq->elv.icq) { if (rq->elv.icq) {
put_io_context(rq->elv.icq->ioc); put_io_context(rq->elv.icq->ioc);
rq->elv.icq = NULL; rq->elv.icq = NULL;
......
...@@ -61,8 +61,8 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) ...@@ -61,8 +61,8 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.mq.allow_merge) if (e->type->ops.allow_merge)
return e->type->ops.mq.allow_merge(q, rq, bio); return e->type->ops.allow_merge(q, rq, bio);
return 1; return 1;
} }
...@@ -180,7 +180,7 @@ static void elevator_release(struct kobject *kobj) ...@@ -180,7 +180,7 @@ static void elevator_release(struct kobject *kobj)
void elevator_exit(struct request_queue *q, struct elevator_queue *e) void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{ {
mutex_lock(&e->sysfs_lock); mutex_lock(&e->sysfs_lock);
if (e->type->ops.mq.exit_sched) if (e->type->ops.exit_sched)
blk_mq_exit_sched(q, e); blk_mq_exit_sched(q, e);
mutex_unlock(&e->sysfs_lock); mutex_unlock(&e->sysfs_lock);
...@@ -329,8 +329,8 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, ...@@ -329,8 +329,8 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
return ELEVATOR_BACK_MERGE; return ELEVATOR_BACK_MERGE;
} }
if (e->type->ops.mq.request_merge) if (e->type->ops.request_merge)
return e->type->ops.mq.request_merge(q, req, bio); return e->type->ops.request_merge(q, req, bio);
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
} }
...@@ -381,8 +381,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq, ...@@ -381,8 +381,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq,
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.mq.request_merged) if (e->type->ops.request_merged)
e->type->ops.mq.request_merged(q, rq, type); e->type->ops.request_merged(q, rq, type);
if (type == ELEVATOR_BACK_MERGE) if (type == ELEVATOR_BACK_MERGE)
elv_rqhash_reposition(q, rq); elv_rqhash_reposition(q, rq);
...@@ -396,8 +396,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, ...@@ -396,8 +396,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
bool next_sorted = false; bool next_sorted = false;
if (e->type->ops.mq.requests_merged) if (e->type->ops.requests_merged)
e->type->ops.mq.requests_merged(q, rq, next); e->type->ops.requests_merged(q, rq, next);
elv_rqhash_reposition(q, rq); elv_rqhash_reposition(q, rq);
...@@ -413,8 +413,8 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq) ...@@ -413,8 +413,8 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.mq.next_request) if (e->type->ops.next_request)
return e->type->ops.mq.next_request(q, rq); return e->type->ops.next_request(q, rq);
return NULL; return NULL;
} }
...@@ -423,8 +423,8 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq) ...@@ -423,8 +423,8 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.mq.former_request) if (e->type->ops.former_request)
return e->type->ops.mq.former_request(q, rq); return e->type->ops.former_request(q, rq);
return NULL; return NULL;
} }
......
...@@ -1017,7 +1017,7 @@ static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = { ...@@ -1017,7 +1017,7 @@ static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
#endif #endif
static struct elevator_type kyber_sched = { static struct elevator_type kyber_sched = {
.ops.mq = { .ops = {
.init_sched = kyber_init_sched, .init_sched = kyber_init_sched,
.exit_sched = kyber_exit_sched, .exit_sched = kyber_exit_sched,
.init_hctx = kyber_init_hctx, .init_hctx = kyber_init_hctx,
......
...@@ -761,7 +761,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { ...@@ -761,7 +761,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
#endif #endif
static struct elevator_type mq_deadline = { static struct elevator_type mq_deadline = {
.ops.mq = { .ops = {
.insert_requests = dd_insert_requests, .insert_requests = dd_insert_requests,
.dispatch_request = dd_dispatch_request, .dispatch_request = dd_dispatch_request,
.prepare_request = dd_prepare_request, .prepare_request = dd_prepare_request,
......
...@@ -69,9 +69,7 @@ struct elevator_type ...@@ -69,9 +69,7 @@ struct elevator_type
struct kmem_cache *icq_cache; struct kmem_cache *icq_cache;
/* fields provided by elevator implementation */ /* fields provided by elevator implementation */
union { struct elevator_mq_ops ops;
struct elevator_mq_ops mq;
} ops;
size_t icq_size; /* see iocontext.h */ size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */ size_t icq_align; /* ditto */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment