Commit 82c22947 authored by David Jeffery's avatar David Jeffery Committed by Jens Axboe

blk-mq: avoid double ->queue_rq() because of early timeout

David Jeffery found one double ->queue_rq() issue, so far it can
be triggered in VM use case because of long vmexit latency or preempt
latency of vCPU pthread or long page fault in vCPU pthread, then block
IO req could be timed out before queuing the request to hardware but after
calling blk_mq_start_request() during ->queue_rq(), then timeout handler
may handle it by requeue, then double ->queue_rq() is caused, and kernel
panic.

So far, it is driver's responsibility to cover the race between timeout
and completion, so it seems supposed to be solved in driver in theory,
given driver has enough knowledge.

But it is really one common problem, lots of driver could have similar
issue, and could be hard to fix all affected drivers, even it isn't easy
for driver to handle the race. So David suggests this patch by draining
in-progress ->queue_rq() for solving this issue.

Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Keith Busch <kbusch@kernel.org>
Cc: virtualization@lists.linux-foundation.org
Cc: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarDavid Jeffery <djeffery@redhat.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20221026051957.358818-1-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 95465318
...@@ -1523,7 +1523,13 @@ static void blk_mq_rq_timed_out(struct request *req) ...@@ -1523,7 +1523,13 @@ static void blk_mq_rq_timed_out(struct request *req)
blk_add_timer(req); blk_add_timer(req);
} }
static bool blk_mq_req_expired(struct request *rq, unsigned long *next) struct blk_expired_data {
bool has_timedout_rq;
unsigned long next;
unsigned long timeout_start;
};
static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
{ {
unsigned long deadline; unsigned long deadline;
...@@ -1533,13 +1539,13 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) ...@@ -1533,13 +1539,13 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
return false; return false;
deadline = READ_ONCE(rq->deadline); deadline = READ_ONCE(rq->deadline);
if (time_after_eq(jiffies, deadline)) if (time_after_eq(expired->timeout_start, deadline))
return true; return true;
if (*next == 0) if (expired->next == 0)
*next = deadline; expired->next = deadline;
else if (time_after(*next, deadline)) else if (time_after(expired->next, deadline))
*next = deadline; expired->next = deadline;
return false; return false;
} }
...@@ -1555,7 +1561,7 @@ void blk_mq_put_rq_ref(struct request *rq) ...@@ -1555,7 +1561,7 @@ void blk_mq_put_rq_ref(struct request *rq)
static bool blk_mq_check_expired(struct request *rq, void *priv) static bool blk_mq_check_expired(struct request *rq, void *priv)
{ {
unsigned long *next = priv; struct blk_expired_data *expired = priv;
/* /*
* blk_mq_queue_tag_busy_iter() has locked the request, so it cannot * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
...@@ -1564,7 +1570,18 @@ static bool blk_mq_check_expired(struct request *rq, void *priv) ...@@ -1564,7 +1570,18 @@ static bool blk_mq_check_expired(struct request *rq, void *priv)
* it was completed and reallocated as a new request after returning * it was completed and reallocated as a new request after returning
* from blk_mq_check_expired(). * from blk_mq_check_expired().
*/ */
if (blk_mq_req_expired(rq, next)) if (blk_mq_req_expired(rq, expired)) {
expired->has_timedout_rq = true;
return false;
}
return true;
}
static bool blk_mq_handle_expired(struct request *rq, void *priv)
{
struct blk_expired_data *expired = priv;
if (blk_mq_req_expired(rq, expired))
blk_mq_rq_timed_out(rq); blk_mq_rq_timed_out(rq);
return true; return true;
} }
...@@ -1573,7 +1590,9 @@ static void blk_mq_timeout_work(struct work_struct *work) ...@@ -1573,7 +1590,9 @@ static void blk_mq_timeout_work(struct work_struct *work)
{ {
struct request_queue *q = struct request_queue *q =
container_of(work, struct request_queue, timeout_work); container_of(work, struct request_queue, timeout_work);
unsigned long next = 0; struct blk_expired_data expired = {
.timeout_start = jiffies,
};
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
unsigned long i; unsigned long i;
...@@ -1593,10 +1612,23 @@ static void blk_mq_timeout_work(struct work_struct *work) ...@@ -1593,10 +1612,23 @@ static void blk_mq_timeout_work(struct work_struct *work)
if (!percpu_ref_tryget(&q->q_usage_counter)) if (!percpu_ref_tryget(&q->q_usage_counter))
return; return;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); /* check if there is any timed-out request */
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
if (expired.has_timedout_rq) {
/*
* Before walking tags, we must ensure any submit started
* before the current time has finished. Since the submit
* uses srcu or rcu, wait for a synchronization point to
* ensure all running submits have finished
*/
blk_mq_wait_quiesce_done(q);
expired.next = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
}
if (next != 0) { if (expired.next != 0) {
mod_timer(&q->timeout, next); mod_timer(&q->timeout, expired.next);
} else { } else {
/* /*
* Request timeouts are handled as a forward rolling timer. If * Request timeouts are handled as a forward rolling timer. If
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment