Commit 4e9b6f20 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Fix a race between blk_cleanup_queue() and timeout handling

Make sure that if the timeout timer fires after a queue has been
marked "dying" that the affected requests are finished.
Reported-by: default avatarchenxiang (M) <chenxiang66@hisilicon.com>
Fixes: commit 287922eb ("block: defer timeouts to a workqueue")
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Tested-by: default avatarchenxiang (M) <chenxiang66@hisilicon.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4d740bc9
...@@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue); ...@@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q) void blk_sync_queue(struct request_queue *q)
{ {
del_timer_sync(&q->timeout); del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
if (q->mq_ops) { if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
...@@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list); INIT_LIST_HEAD(&q->icq_list);
......
...@@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work) ...@@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work)
struct request *rq, *tmp; struct request *rq, *tmp;
int next_set = 0; int next_set = 0;
if (blk_queue_enter(q, true))
return;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
...@@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work) ...@@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work)
mod_timer(&q->timeout, round_jiffies_up(next)); mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
blk_queue_exit(q);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment