Commit 669f0441 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: srp_transport: Move queuecommand() wait code to SCSI core

Additionally, rename srp_wait_for_queuecommand() into
scsi_wait_for_queuecommand() and add a comment about the queuecommand()
call from scsi_send_eh_cmnd().

Note: this patch changes scsi_internal_device_block from a function that
did not sleep into a function that may sleep. This is fine for all
callers of this function:

* scsi_internal_device_block() is called from the mpt3sas device while
  that driver holds the ioc->dm_cmds.mutex. This means that the mpt3sas
  driver calls this function from thread context.
* scsi_target_block() is called by __iscsi_block_session() from
  kernel thread context and with IRQs enabled.
* The SRP transport code also calls scsi_target_block() from kernel
  thread context while sleeping is allowed.
* The snic driver also calls scsi_target_block() from a context from
  which sleeping is allowed. The scsi_target_block() call namely occurs
  immediately after a scsi_flush_work() call.

[mkp: s/shost/sdev/]
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Cc: James Bottomley <jejb@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Doug Ledford <dledford@redhat.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 27c3d768
...@@ -2733,6 +2733,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev, ...@@ -2733,6 +2733,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
} }
EXPORT_SYMBOL_GPL(sdev_evt_send_simple); EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
* scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
* @sdev: SCSI device to count the number of scsi_request_fn() callers for.
*/
static int scsi_request_fn_active(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
int request_fn_active;
WARN_ON_ONCE(sdev->host->use_blk_mq);
spin_lock_irq(q->queue_lock);
request_fn_active = q->request_fn_active;
spin_unlock_irq(q->queue_lock);
return request_fn_active;
}
/**
* scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
* @sdev: SCSI device pointer.
*
* Wait until the ongoing shost->hostt->queuecommand() calls that are
* invoked from scsi_request_fn() have finished.
*/
static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
{
WARN_ON_ONCE(sdev->host->use_blk_mq);
while (scsi_request_fn_active(sdev))
msleep(20);
}
/** /**
* scsi_device_quiesce - Block user issued commands. * scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce. * @sdev: scsi device to quiesce.
...@@ -2817,8 +2850,7 @@ EXPORT_SYMBOL(scsi_target_resume); ...@@ -2817,8 +2850,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* @sdev: device to block * @sdev: device to block
* *
* Block request made by scsi lld's to temporarily stop all * Block request made by scsi lld's to temporarily stop all
* scsi commands on the specified device. Called from interrupt * scsi commands on the specified device. May sleep.
* or normal process context.
* *
* Returns zero if successful or error if not * Returns zero if successful or error if not
* *
...@@ -2827,6 +2859,10 @@ EXPORT_SYMBOL(scsi_target_resume); ...@@ -2827,6 +2859,10 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this * (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables * state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires. * the device with scsi_device_unblock or device_block_tmo fires.
*
* To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
* scsi_internal_device_block() has blocked a SCSI device and also
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/ */
int int
scsi_internal_device_block(struct scsi_device *sdev) scsi_internal_device_block(struct scsi_device *sdev)
...@@ -2854,6 +2890,7 @@ scsi_internal_device_block(struct scsi_device *sdev) ...@@ -2854,6 +2890,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q); blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
scsi_wait_for_queuecommand(sdev);
} }
return 0; return 0;
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/delay.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
...@@ -393,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work) ...@@ -393,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
} }
} }
/**
* scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
* @shost: SCSI host for which to count the number of scsi_request_fn() callers.
*
* To do: add support for scsi-mq in this function.
*/
static int scsi_request_fn_active(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
struct request_queue *q;
int request_fn_active = 0;
shost_for_each_device(sdev, shost) {
q = sdev->request_queue;
spin_lock_irq(q->queue_lock);
request_fn_active += q->request_fn_active;
spin_unlock_irq(q->queue_lock);
}
return request_fn_active;
}
/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
{
while (scsi_request_fn_active(shost))
msleep(20);
}
static void __rport_fail_io_fast(struct srp_rport *rport) static void __rport_fail_io_fast(struct srp_rport *rport)
{ {
struct Scsi_Host *shost = rport_to_shost(rport); struct Scsi_Host *shost = rport_to_shost(rport);
...@@ -432,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport) ...@@ -432,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
return; return;
/*
* Call scsi_target_block() to wait for ongoing shost->queuecommand()
* calls before invoking i->f->terminate_rport_io().
*/
scsi_target_block(rport->dev.parent);
scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
/* Involve the LLD if possible to terminate all I/O on the rport. */ /* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt); i = to_srp_internal(shost->transportt);
if (i->f->terminate_rport_io) { if (i->f->terminate_rport_io)
srp_wait_for_queuecommand(shost);
i->f->terminate_rport_io(rport); i->f->terminate_rport_io(rport);
}
} }
/** /**
...@@ -567,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport) ...@@ -567,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
if (res) if (res)
goto out; goto out;
scsi_target_block(&shost->shost_gendev); scsi_target_block(&shost->shost_gendev);
srp_wait_for_queuecommand(shost);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n", pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res); dev_name(&shost->shost_gendev), rport->state, res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment