Commit b2097bd2 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: mq-deadline: Reduce lock contention

blk_mq_free_requests() calls dd_finish_request() indirectly. Prevent
nested locking of dd->lock and dd->zone_lock by moving the code for
freeing requests.
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20230517174230.897144-9-bvanassche@acm.orgSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3b463cbe
...@@ -757,7 +757,7 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio, ...@@ -757,7 +757,7 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
* add rq to rbtree and fifo * add rq to rbtree and fifo
*/ */
static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_insert_t flags) blk_insert_t flags, struct list_head *free)
{ {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
...@@ -766,7 +766,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -766,7 +766,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
struct dd_per_prio *per_prio; struct dd_per_prio *per_prio;
enum dd_prio prio; enum dd_prio prio;
LIST_HEAD(free);
lockdep_assert_held(&dd->lock); lockdep_assert_held(&dd->lock);
...@@ -783,10 +782,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -783,10 +782,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
rq->elv.priv[0] = (void *)(uintptr_t)1; rq->elv.priv[0] = (void *)(uintptr_t)1;
} }
if (blk_mq_sched_try_insert_merge(q, rq, &free)) { if (blk_mq_sched_try_insert_merge(q, rq, free))
blk_mq_free_requests(&free);
return; return;
}
trace_block_rq_insert(rq); trace_block_rq_insert(rq);
...@@ -819,6 +816,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, ...@@ -819,6 +816,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
{ {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
LIST_HEAD(free);
spin_lock(&dd->lock); spin_lock(&dd->lock);
while (!list_empty(list)) { while (!list_empty(list)) {
...@@ -826,9 +824,11 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, ...@@ -826,9 +824,11 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
rq = list_first_entry(list, struct request, queuelist); rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
dd_insert_request(hctx, rq, flags); dd_insert_request(hctx, rq, flags, &free);
} }
spin_unlock(&dd->lock); spin_unlock(&dd->lock);
blk_mq_free_requests(&free);
} }
/* Callback from inside blk_mq_rq_ctx_init(). */ /* Callback from inside blk_mq_rq_ctx_init(). */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment