Commit 8b566edb authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: core: Only kick the requeue list if necessary

Instead of running the request queue of each device associated with a host
every 3 ms (BLK_MQ_RESOURCE_DELAY) while host error handling is in
progress, run the request queue after error handling has finished.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.de>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Mike Christie <michael.christie@oracle.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20230518193159.1166304-4-bvanassche@acm.orgSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 8bb1c624
...@@ -122,11 +122,9 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) ...@@ -122,11 +122,9 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
WARN_ON_ONCE(true); WARN_ON_ONCE(true);
} }
if (msecs) { blk_mq_requeue_request(rq, false);
blk_mq_requeue_request(rq, false); if (!scsi_host_in_recovery(cmd->device->host))
blk_mq_delay_kick_requeue_list(rq->q, msecs); blk_mq_delay_kick_requeue_list(rq->q, msecs);
} else
blk_mq_requeue_request(rq, true);
} }
/** /**
...@@ -165,7 +163,8 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) ...@@ -165,7 +163,8 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
*/ */
cmd->result = 0; cmd->result = 0;
blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true); blk_mq_requeue_request(scsi_cmd_to_rq(cmd),
!scsi_host_in_recovery(cmd->device->host));
} }
/** /**
...@@ -453,6 +452,7 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -453,6 +452,7 @@ static void scsi_run_queue(struct request_queue *q)
if (!list_empty(&sdev->host->starved_list)) if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host); scsi_starved_list_run(sdev->host);
blk_mq_kick_requeue_list(q);
blk_mq_run_hw_queues(q, false); blk_mq_run_hw_queues(q, false);
} }
...@@ -503,6 +503,9 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) ...@@ -503,6 +503,9 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_run_queue_async(struct scsi_device *sdev) static void scsi_run_queue_async(struct scsi_device *sdev)
{ {
if (scsi_host_in_recovery(sdev->host))
return;
if (scsi_target(sdev)->single_lun || if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list)) { !list_empty(&sdev->host->starved_list)) {
kblockd_schedule_work(&sdev->requeue_work); kblockd_schedule_work(&sdev->requeue_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment