Commit 209fae14 authored by Dan Williams's avatar Dan Williams

isci: atomic device lookup and reference counting

We have unsafe references to remote devices that are notified to
disappear at lldd_dev_gone.  In order to clean this up we need a single
canonical source for device lookups and stable references once a lookup
succeeds.  Towards that end guarantee that domain_device.lldd_dev is
NULL as soon as we start the process of stopping a device.  Any code
path that wants to safely lookup a remote device must do so through
task->dev->lldd_dev (isci_lookup_device()).

For in-flight references outside of scic_lock we need reference counting
to ensure that the device is not recycled before we are done with it.
Simplify device back references to just scic_sds_request.target_device
which is now the only permissible internal reference that is maintained
relative to the reference count.

There were two occasions where we wanted new i/o's to be treated as
SAS_TASK_UNDELIVERED but where the domain_dev->lldd_dev link is still
intact.  Introduce a 'gone' flag to prevent i/o while waiting for libsas
to take action on the port down event.

One 'core' leftover is that we currently call
scic_remote_device_destruct() from isci_remote_device_deconstruct()
which is called when the 'core' says the device is stopped.  It would be
more natural for the final put to trigger
isci_remote_device_deconstruct() but this implementation is deferred as
it requires other changes.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 360b03ed
...@@ -1327,8 +1327,8 @@ void isci_host_deinit(struct isci_host *ihost) ...@@ -1327,8 +1327,8 @@ void isci_host_deinit(struct isci_host *ihost)
struct isci_remote_device *idev, *d; struct isci_remote_device *idev, *d;
list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
isci_remote_device_change_state(idev, isci_stopping); if (test_bit(IDEV_ALLOCATED, &idev->flags))
isci_remote_device_stop(ihost, idev); isci_remote_device_stop(ihost, idev);
} }
} }
......
...@@ -321,8 +321,7 @@ static void isci_port_link_down(struct isci_host *isci_host, ...@@ -321,8 +321,7 @@ static void isci_port_link_down(struct isci_host *isci_host,
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
"%s: isci_device = %p\n", "%s: isci_device = %p\n",
__func__, isci_device); __func__, isci_device);
isci_remote_device_change_state(isci_device, set_bit(IDEV_GONE, &isci_device->flags);
isci_stopping);
} }
} }
isci_port_change_state(isci_port, isci_stopping); isci_port_change_state(isci_port, isci_stopping);
......
...@@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, ...@@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
"%s: isci_device = %p\n", __func__, idev); "%s: isci_device = %p\n", __func__, idev);
if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED) if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED)
isci_remote_device_change_state(idev, isci_stopping); set_bit(IDEV_GONE, &idev->flags);
else else
/* device ready is actually a "not ready for io" state. */ /* device ready is actually a "not ready for io" state. */
isci_remote_device_change_state(idev, isci_ready); isci_remote_device_change_state(idev, isci_ready);
...@@ -449,8 +449,10 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device * ...@@ -449,8 +449,10 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *
/* cleanup requests that failed after starting on the port */ /* cleanup requests that failed after starting on the port */
if (status != SCI_SUCCESS) if (status != SCI_SUCCESS)
scic_sds_port_complete_io(sci_port, sci_dev, sci_req); scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
else else {
kref_get(&sci_dev_to_idev(sci_dev)->kref);
scic_sds_remote_device_increment_request_count(sci_dev); scic_sds_remote_device_increment_request_count(sci_dev);
}
} }
enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic,
...@@ -656,6 +658,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s ...@@ -656,6 +658,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
"could not complete\n", __func__, sci_port, "could not complete\n", __func__, sci_port,
sci_dev, sci_req, status); sci_dev, sci_req, status);
else
isci_put_device(sci_dev_to_idev(sci_dev));
return status; return status;
} }
...@@ -860,23 +864,11 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ ...@@ -860,23 +864,11 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
* here should go through isci_remote_device_nuke_requests. * here should go through isci_remote_device_nuke_requests.
* If we hit this condition, we will need a way to complete * If we hit this condition, we will need a way to complete
* io requests in process */ * io requests in process */
while (!list_empty(&idev->reqs_in_process)) { BUG_ON(!list_empty(&idev->reqs_in_process));
dev_err(&ihost->pdev->dev,
"%s: ** request list not empty! **\n", __func__);
BUG();
}
scic_remote_device_destruct(&idev->sci); scic_remote_device_destruct(&idev->sci);
idev->domain_dev->lldd_dev = NULL;
idev->domain_dev = NULL;
idev->isci_port = NULL;
list_del_init(&idev->node); list_del_init(&idev->node);
isci_put_device(idev);
clear_bit(IDEV_START_PENDING, &idev->flags);
clear_bit(IDEV_STOP_PENDING, &idev->flags);
clear_bit(IDEV_EH, &idev->flags);
wake_up(&ihost->eventq);
} }
/** /**
...@@ -1314,6 +1306,22 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) ...@@ -1314,6 +1306,22 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
return idev; return idev;
} }
void isci_remote_device_release(struct kref *kref)
{
struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
struct isci_host *ihost = idev->isci_port->isci_host;
idev->domain_dev = NULL;
idev->isci_port = NULL;
clear_bit(IDEV_START_PENDING, &idev->flags);
clear_bit(IDEV_STOP_PENDING, &idev->flags);
clear_bit(IDEV_GONE, &idev->flags);
clear_bit(IDEV_EH, &idev->flags);
smp_mb__before_clear_bit();
clear_bit(IDEV_ALLOCATED, &idev->flags);
wake_up(&ihost->eventq);
}
/** /**
* isci_remote_device_stop() - This function is called internally to stop the * isci_remote_device_stop() - This function is called internally to stop the
* remote device. * remote device.
...@@ -1330,7 +1338,11 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem ...@@ -1330,7 +1338,11 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
"%s: isci_device = %p\n", __func__, idev); "%s: isci_device = %p\n", __func__, idev);
spin_lock_irqsave(&ihost->scic_lock, flags);
idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
set_bit(IDEV_GONE, &idev->flags);
isci_remote_device_change_state(idev, isci_stopping); isci_remote_device_change_state(idev, isci_stopping);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Kill all outstanding requests. */ /* Kill all outstanding requests. */
isci_remote_device_nuke_requests(ihost, idev); isci_remote_device_nuke_requests(ihost, idev);
...@@ -1342,14 +1354,10 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem ...@@ -1342,14 +1354,10 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
spin_unlock_irqrestore(&ihost->scic_lock, flags); spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Wait for the stop complete callback. */ /* Wait for the stop complete callback. */
if (status == SCI_SUCCESS) { if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
/* nothing to wait for */;
else
wait_for_device_stop(ihost, idev); wait_for_device_stop(ihost, idev);
clear_bit(IDEV_ALLOCATED, &idev->flags);
}
dev_dbg(&ihost->pdev->dev,
"%s: idev = %p - after completion wait\n",
__func__, idev);
return status; return status;
} }
...@@ -1416,39 +1424,33 @@ int isci_remote_device_found(struct domain_device *domain_dev) ...@@ -1416,39 +1424,33 @@ int isci_remote_device_found(struct domain_device *domain_dev)
if (!isci_device) if (!isci_device)
return -ENODEV; return -ENODEV;
kref_init(&isci_device->kref);
INIT_LIST_HEAD(&isci_device->node); INIT_LIST_HEAD(&isci_device->node);
domain_dev->lldd_dev = isci_device;
spin_lock_irq(&isci_host->scic_lock);
isci_device->domain_dev = domain_dev; isci_device->domain_dev = domain_dev;
isci_device->isci_port = isci_port; isci_device->isci_port = isci_port;
isci_remote_device_change_state(isci_device, isci_starting); isci_remote_device_change_state(isci_device, isci_starting);
spin_lock_irq(&isci_host->scic_lock);
list_add_tail(&isci_device->node, &isci_port->remote_dev_list); list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
set_bit(IDEV_START_PENDING, &isci_device->flags); set_bit(IDEV_START_PENDING, &isci_device->flags);
status = isci_remote_device_construct(isci_port, isci_device); status = isci_remote_device_construct(isci_port, isci_device);
spin_unlock_irq(&isci_host->scic_lock);
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
"%s: isci_device = %p\n", "%s: isci_device = %p\n",
__func__, isci_device); __func__, isci_device);
if (status != SCI_SUCCESS) { if (status == SCI_SUCCESS) {
/* device came up, advertise it to the world */
spin_lock_irq(&isci_host->scic_lock); domain_dev->lldd_dev = isci_device;
isci_remote_device_deconstruct( } else
isci_host, isci_put_device(isci_device);
isci_device spin_unlock_irq(&isci_host->scic_lock);
);
spin_unlock_irq(&isci_host->scic_lock);
return -ENODEV;
}
/* wait for the device ready callback. */ /* wait for the device ready callback. */
wait_for_device_start(isci_host, isci_device); wait_for_device_start(isci_host, isci_device);
return 0; return status == SCI_SUCCESS ? 0 : -ENODEV;
} }
/** /**
* isci_device_is_reset_pending() - This function will check if there is any * isci_device_is_reset_pending() - This function will check if there is any
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#ifndef _ISCI_REMOTE_DEVICE_H_ #ifndef _ISCI_REMOTE_DEVICE_H_
#define _ISCI_REMOTE_DEVICE_H_ #define _ISCI_REMOTE_DEVICE_H_
#include <scsi/libsas.h> #include <scsi/libsas.h>
#include <linux/kref.h>
#include "scu_remote_node_context.h" #include "scu_remote_node_context.h"
#include "remote_node_context.h" #include "remote_node_context.h"
#include "port.h" #include "port.h"
...@@ -134,7 +135,9 @@ struct isci_remote_device { ...@@ -134,7 +135,9 @@ struct isci_remote_device {
#define IDEV_STOP_PENDING 1 #define IDEV_STOP_PENDING 1
#define IDEV_ALLOCATED 2 #define IDEV_ALLOCATED 2
#define IDEV_EH 3 #define IDEV_EH 3
#define IDEV_GONE 4
unsigned long flags; unsigned long flags;
struct kref kref;
struct isci_port *isci_port; struct isci_port *isci_port;
struct domain_device *domain_dev; struct domain_device *domain_dev;
struct list_head node; struct list_head node;
...@@ -145,6 +148,26 @@ struct isci_remote_device { ...@@ -145,6 +148,26 @@ struct isci_remote_device {
#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
/* device reference routines must be called under scic_lock */
static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
{
struct isci_remote_device *idev = dev->lldd_dev;
if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
kref_get(&idev->kref);
return idev;
}
return NULL;
}
void isci_remote_device_release(struct kref *kref);
static inline void isci_put_device(struct isci_remote_device *idev)
{
if (idev)
kref_put(&idev->kref, isci_remote_device_release);
}
enum sci_status isci_remote_device_stop(struct isci_host *ihost, enum sci_status isci_remote_device_stop(struct isci_host *ihost,
struct isci_remote_device *idev); struct isci_remote_device *idev);
void isci_remote_device_nuke_requests(struct isci_host *ihost, void isci_remote_device_nuke_requests(struct isci_host *ihost,
......
...@@ -2313,7 +2313,7 @@ static void isci_request_set_open_reject_status( ...@@ -2313,7 +2313,7 @@ static void isci_request_set_open_reject_status(
* none. * none.
*/ */
static void isci_request_handle_controller_specific_errors( static void isci_request_handle_controller_specific_errors(
struct isci_remote_device *isci_device, struct isci_remote_device *idev,
struct isci_request *request, struct isci_request *request,
struct sas_task *task, struct sas_task *task,
enum service_response *response_ptr, enum service_response *response_ptr,
...@@ -2353,8 +2353,7 @@ static void isci_request_handle_controller_specific_errors( ...@@ -2353,8 +2353,7 @@ static void isci_request_handle_controller_specific_errors(
* that we ignore the quiesce state, since we are * that we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
*/ */
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
*status_ptr = SAS_DEVICE_UNKNOWN; *status_ptr = SAS_DEVICE_UNKNOWN;
else else
*status_ptr = SAS_ABORTED_TASK; *status_ptr = SAS_ABORTED_TASK;
...@@ -2367,8 +2366,7 @@ static void isci_request_handle_controller_specific_errors( ...@@ -2367,8 +2366,7 @@ static void isci_request_handle_controller_specific_errors(
/* Task in the target is not done. */ /* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED; *response_ptr = SAS_TASK_UNDELIVERED;
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
*status_ptr = SAS_DEVICE_UNKNOWN; *status_ptr = SAS_DEVICE_UNKNOWN;
else else
*status_ptr = SAM_STAT_TASK_ABORTED; *status_ptr = SAM_STAT_TASK_ABORTED;
...@@ -2399,8 +2397,7 @@ static void isci_request_handle_controller_specific_errors( ...@@ -2399,8 +2397,7 @@ static void isci_request_handle_controller_specific_errors(
* that we ignore the quiesce state, since we are * that we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
*/ */
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
*status_ptr = SAS_DEVICE_UNKNOWN; *status_ptr = SAS_DEVICE_UNKNOWN;
else else
*status_ptr = SAS_ABORTED_TASK; *status_ptr = SAS_ABORTED_TASK;
...@@ -2629,7 +2626,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2629,7 +2626,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
struct ssp_response_iu *resp_iu; struct ssp_response_iu *resp_iu;
void *resp_buf; void *resp_buf;
unsigned long task_flags; unsigned long task_flags;
struct isci_remote_device *isci_device = request->isci_device; struct isci_remote_device *idev = isci_lookup_device(task->dev);
enum service_response response = SAS_TASK_UNDELIVERED; enum service_response response = SAS_TASK_UNDELIVERED;
enum exec_status status = SAS_ABORTED_TASK; enum exec_status status = SAS_ABORTED_TASK;
enum isci_request_status request_status; enum isci_request_status request_status;
...@@ -2672,9 +2669,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2672,9 +2669,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
* that we ignore the quiesce state, since we are * that we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
*/ */
if ((isci_device->status == isci_stopping) if (!idev)
|| (isci_device->status == isci_stopped)
)
status = SAS_DEVICE_UNKNOWN; status = SAS_DEVICE_UNKNOWN;
else else
status = SAS_ABORTED_TASK; status = SAS_ABORTED_TASK;
...@@ -2697,8 +2692,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2697,8 +2692,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
request->complete_in_target = true; request->complete_in_target = true;
response = SAS_TASK_UNDELIVERED; response = SAS_TASK_UNDELIVERED;
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
/* The device has been /is being stopped. Note that /* The device has been /is being stopped. Note that
* we ignore the quiesce state, since we are * we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
...@@ -2728,8 +2722,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2728,8 +2722,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
* that we ignore the quiesce state, since we are * that we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
*/ */
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
status = SAS_DEVICE_UNKNOWN; status = SAS_DEVICE_UNKNOWN;
else else
status = SAS_ABORTED_TASK; status = SAS_ABORTED_TASK;
...@@ -2861,8 +2854,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2861,8 +2854,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
* that we ignore the quiesce state, since we are * that we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
*/ */
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
status = SAS_DEVICE_UNKNOWN; status = SAS_DEVICE_UNKNOWN;
else else
status = SAS_ABORTED_TASK; status = SAS_ABORTED_TASK;
...@@ -2873,7 +2865,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2873,7 +2865,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
isci_request_handle_controller_specific_errors( isci_request_handle_controller_specific_errors(
isci_device, request, task, &response, &status, idev, request, task, &response, &status,
&complete_to_host); &complete_to_host);
break; break;
...@@ -2902,8 +2894,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2902,8 +2894,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
/* Fail the I/O so it can be retried. */ /* Fail the I/O so it can be retried. */
response = SAS_TASK_UNDELIVERED; response = SAS_TASK_UNDELIVERED;
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
status = SAS_DEVICE_UNKNOWN; status = SAS_DEVICE_UNKNOWN;
else else
status = SAS_ABORTED_TASK; status = SAS_ABORTED_TASK;
...@@ -2926,8 +2917,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2926,8 +2917,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
* that we ignore the quiesce state, since we are * that we ignore the quiesce state, since we are
* concerned about the actual device state. * concerned about the actual device state.
*/ */
if ((isci_device->status == isci_stopping) || if (!idev)
(isci_device->status == isci_stopped))
status = SAS_DEVICE_UNKNOWN; status = SAS_DEVICE_UNKNOWN;
else else
status = SAS_ABORTED_TASK; status = SAS_ABORTED_TASK;
...@@ -2953,8 +2943,10 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, ...@@ -2953,8 +2943,10 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
/* complete the io request to the core. */ /* complete the io request to the core. */
scic_controller_complete_io(&isci_host->sci, scic_controller_complete_io(&isci_host->sci,
&isci_device->sci, request->sci.target_device,
&request->sci); &request->sci);
isci_put_device(idev);
/* set terminated handle so it cannot be completed or /* set terminated handle so it cannot be completed or
* terminated again, and to cause any calls into abort * terminated again, and to cause any calls into abort
* task to recognize the already completed case. * task to recognize the already completed case.
...@@ -3511,7 +3503,6 @@ static enum sci_status isci_io_request_build( ...@@ -3511,7 +3503,6 @@ static enum sci_status isci_io_request_build(
} }
static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
struct isci_remote_device *idev,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
dma_addr_t handle; dma_addr_t handle;
...@@ -3528,7 +3519,6 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, ...@@ -3528,7 +3519,6 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
spin_lock_init(&ireq->state_lock); spin_lock_init(&ireq->state_lock);
ireq->request_daddr = handle; ireq->request_daddr = handle;
ireq->isci_host = ihost; ireq->isci_host = ihost;
ireq->isci_device = idev;
ireq->io_request_completion = NULL; ireq->io_request_completion = NULL;
ireq->terminated = false; ireq->terminated = false;
...@@ -3546,12 +3536,11 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, ...@@ -3546,12 +3536,11 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
struct sas_task *task, struct sas_task *task,
struct isci_remote_device *idev,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
struct isci_request *ireq; struct isci_request *ireq;
ireq = isci_request_alloc_core(ihost, idev, gfp_flags); ireq = isci_request_alloc_core(ihost, gfp_flags);
if (ireq) { if (ireq) {
ireq->ttype_ptr.io_task_ptr = task; ireq->ttype_ptr.io_task_ptr = task;
ireq->ttype = io_task; ireq->ttype = io_task;
...@@ -3562,12 +3551,11 @@ static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, ...@@ -3562,12 +3551,11 @@ static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
struct isci_tmf *isci_tmf, struct isci_tmf *isci_tmf,
struct isci_remote_device *idev,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
struct isci_request *ireq; struct isci_request *ireq;
ireq = isci_request_alloc_core(ihost, idev, gfp_flags); ireq = isci_request_alloc_core(ihost, gfp_flags);
if (ireq) { if (ireq) {
ireq->ttype_ptr.tmf_task_ptr = isci_tmf; ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
ireq->ttype = tmf_task; ireq->ttype = tmf_task;
...@@ -3575,21 +3563,16 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, ...@@ -3575,21 +3563,16 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
return ireq; return ireq;
} }
int isci_request_execute(struct isci_host *ihost, struct sas_task *task, int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
gfp_t gfp_flags) struct sas_task *task, gfp_t gfp_flags)
{ {
enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
struct scic_sds_remote_device *sci_dev;
struct isci_remote_device *idev;
struct isci_request *ireq; struct isci_request *ireq;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
idev = task->dev->lldd_dev;
sci_dev = &idev->sci;
/* do common allocation and init of request object. */ /* do common allocation and init of request object. */
ireq = isci_request_alloc_io(ihost, task, idev, gfp_flags); ireq = isci_request_alloc_io(ihost, task, gfp_flags);
if (!ireq) if (!ireq)
goto out; goto out;
...@@ -3605,8 +3588,7 @@ int isci_request_execute(struct isci_host *ihost, struct sas_task *task, ...@@ -3605,8 +3588,7 @@ int isci_request_execute(struct isci_host *ihost, struct sas_task *task,
spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock_irqsave(&ihost->scic_lock, flags);
/* send the request, let the core assign the IO TAG. */ /* send the request, let the core assign the IO TAG. */
status = scic_controller_start_io(&ihost->sci, sci_dev, status = scic_controller_start_io(&ihost->sci, &idev->sci, &ireq->sci,
&ireq->sci,
SCI_CONTROLLER_INVALID_IO_TAG); SCI_CONTROLLER_INVALID_IO_TAG);
if (status != SCI_SUCCESS && if (status != SCI_SUCCESS &&
status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
......
...@@ -285,7 +285,6 @@ struct isci_request { ...@@ -285,7 +285,6 @@ struct isci_request {
struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
} ttype_ptr; } ttype_ptr;
struct isci_host *isci_host; struct isci_host *isci_host;
struct isci_remote_device *isci_device;
/* For use in the requests_to_{complete|abort} lists: */ /* For use in the requests_to_{complete|abort} lists: */
struct list_head completed_node; struct list_head completed_node;
/* For use in the reqs_in_process list: */ /* For use in the reqs_in_process list: */
...@@ -681,12 +680,10 @@ static inline void isci_request_free(struct isci_host *isci_host, ...@@ -681,12 +680,10 @@ static inline void isci_request_free(struct isci_host *isci_host,
struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
struct isci_tmf *isci_tmf, struct isci_tmf *isci_tmf,
struct isci_remote_device *idev,
gfp_t gfp_flags); gfp_t gfp_flags);
int isci_request_execute(struct isci_host *isci_host, int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
struct sas_task *task, struct sas_task *task, gfp_t gfp_flags);
gfp_t gfp_flags);
/** /**
* isci_request_unmap_sgl() - This function unmaps the DMA address of a given * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
......
...@@ -213,11 +213,10 @@ int isci_task_send_lu_reset_sata( ...@@ -213,11 +213,10 @@ int isci_task_send_lu_reset_sata(
/* Send the soft reset to the target */ /* Send the soft reset to the target */
#define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
isci_task_build_tmf(&tmf, isci_device, isci_tmf_sata_srst_high, isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
NULL, NULL
);
ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_SRST_TIMEOUT_MS); ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
ISCI_SRST_TIMEOUT_MS);
if (ret != TMF_RESP_FUNC_COMPLETE) { if (ret != TMF_RESP_FUNC_COMPLETE) {
dev_warn(&isci_host->pdev->dev, dev_warn(&isci_host->pdev->dev,
......
...@@ -146,7 +146,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, ...@@ -146,7 +146,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
{ {
struct isci_host *ihost = dev_to_ihost(task->dev); struct isci_host *ihost = dev_to_ihost(task->dev);
struct isci_remote_device *device; struct isci_remote_device *idev;
unsigned long flags; unsigned long flags;
int ret; int ret;
enum sci_status status; enum sci_status status;
...@@ -166,11 +166,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -166,11 +166,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
"task = %p, num = %d; dev = %p; cmd = %p\n", "task = %p, num = %d; dev = %p; cmd = %p\n",
task, num, task->dev, task->uldd_task); task, num, task->dev, task->uldd_task);
spin_lock_irqsave(&ihost->scic_lock, flags);
idev = isci_lookup_device(task->dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
device = task->dev->lldd_dev; if (idev)
device_status = idev->status;
if (device)
device_status = device->status;
else else
device_status = isci_freed; device_status = isci_freed;
...@@ -188,7 +189,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -188,7 +189,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
__func__, __func__,
task, task,
isci_host_get_state(ihost), isci_host_get_state(ihost),
device, idev,
device_status); device_status);
if (device_status == isci_ready) { if (device_status == isci_ready) {
...@@ -225,7 +226,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -225,7 +226,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */ /* build and send the request. */
status = isci_request_execute(ihost, task, gfp_flags); status = isci_request_execute(ihost, idev, task, gfp_flags);
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {
...@@ -248,33 +249,31 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -248,33 +249,31 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
} }
} }
} }
isci_put_device(idev);
} }
return 0; return 0;
} }
static struct isci_request *isci_task_request_build(struct isci_host *ihost, static struct isci_request *isci_task_request_build(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_tmf *isci_tmf) struct isci_tmf *isci_tmf)
{ {
struct scic_sds_remote_device *sci_dev;
enum sci_status status = SCI_FAILURE; enum sci_status status = SCI_FAILURE;
struct isci_request *ireq = NULL; struct isci_request *ireq = NULL;
struct isci_remote_device *idev;
struct domain_device *dev; struct domain_device *dev;
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
"%s: isci_tmf = %p\n", __func__, isci_tmf); "%s: isci_tmf = %p\n", __func__, isci_tmf);
idev = isci_tmf->device;
sci_dev = &idev->sci;
dev = idev->domain_dev; dev = idev->domain_dev;
/* do common allocation and init of request object. */ /* do common allocation and init of request object. */
ireq = isci_request_alloc_tmf(ihost, isci_tmf, idev, GFP_ATOMIC); ireq = isci_request_alloc_tmf(ihost, isci_tmf, GFP_ATOMIC);
if (!ireq) if (!ireq)
return NULL; return NULL;
/* let the core do it's construct. */ /* let the core do it's construct. */
status = scic_task_request_construct(&ihost->sci, sci_dev, status = scic_task_request_construct(&ihost->sci, &idev->sci,
SCI_CONTROLLER_INVALID_IO_TAG, SCI_CONTROLLER_INVALID_IO_TAG,
&ireq->sci); &ireq->sci);
...@@ -309,25 +308,13 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, ...@@ -309,25 +308,13 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return ireq; return ireq;
} }
/** int isci_task_execute_tmf(struct isci_host *ihost,
* isci_task_execute_tmf() - This function builds and sends a task request, struct isci_remote_device *isci_device,
* then waits for the completion. struct isci_tmf *tmf, unsigned long timeout_ms)
* @isci_host: This parameter specifies the ISCI host object
* @tmf: This parameter is the pointer to the task management structure for
* this request.
* @timeout_ms: This parameter specifies the timeout period for the task
* management request.
*
* TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
* error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
*/
int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
unsigned long timeout_ms)
{ {
DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(completion);
enum sci_task_status status = SCI_TASK_FAILURE; enum sci_task_status status = SCI_TASK_FAILURE;
struct scic_sds_remote_device *sci_device; struct scic_sds_remote_device *sci_device;
struct isci_remote_device *isci_device = tmf->device;
struct isci_request *ireq; struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED; int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags; unsigned long flags;
...@@ -352,7 +339,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, ...@@ -352,7 +339,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
/* Assign the pointer to the TMF's completion kernel wait structure. */ /* Assign the pointer to the TMF's completion kernel wait structure. */
tmf->complete = &completion; tmf->complete = &completion;
ireq = isci_task_request_build(ihost, tmf); ireq = isci_task_request_build(ihost, isci_device, tmf);
if (!ireq) { if (!ireq) {
dev_warn(&ihost->pdev->dev, dev_warn(&ihost->pdev->dev,
"%s: isci_task_request_build failed\n", "%s: isci_task_request_build failed\n",
...@@ -399,10 +386,9 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, ...@@ -399,10 +386,9 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
if (tmf->cb_state_func != NULL) if (tmf->cb_state_func != NULL)
tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
status = scic_controller_terminate_request( status = scic_controller_terminate_request(&ihost->sci,
&ireq->isci_host->sci, &isci_device->sci,
&ireq->isci_device->sci, &ireq->sci);
&ireq->sci);
spin_unlock_irqrestore(&ihost->scic_lock, flags); spin_unlock_irqrestore(&ihost->scic_lock, flags);
} }
...@@ -437,65 +423,32 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, ...@@ -437,65 +423,32 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
void isci_task_build_tmf( void isci_task_build_tmf(
struct isci_tmf *tmf, struct isci_tmf *tmf,
struct isci_remote_device *isci_device,
enum isci_tmf_function_codes code, enum isci_tmf_function_codes code,
void (*tmf_sent_cb)(enum isci_tmf_cb_state, void (*tmf_sent_cb)(enum isci_tmf_cb_state,
struct isci_tmf *, struct isci_tmf *,
void *), void *),
void *cb_data) void *cb_data)
{ {
dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
"%s: isci_device = %p\n", __func__, isci_device);
memset(tmf, 0, sizeof(*tmf)); memset(tmf, 0, sizeof(*tmf));
tmf->device = isci_device;
tmf->tmf_code = code; tmf->tmf_code = code;
tmf->cb_state_func = tmf_sent_cb; tmf->cb_state_func = tmf_sent_cb;
tmf->cb_data = cb_data; tmf->cb_data = cb_data;
} }
static void isci_task_build_abort_task_tmf( static void isci_task_build_abort_task_tmf(
struct isci_tmf *tmf, struct isci_tmf *tmf,
struct isci_remote_device *isci_device,
enum isci_tmf_function_codes code, enum isci_tmf_function_codes code,
void (*tmf_sent_cb)(enum isci_tmf_cb_state, void (*tmf_sent_cb)(enum isci_tmf_cb_state,
struct isci_tmf *, struct isci_tmf *,
void *), void *),
struct isci_request *old_request) struct isci_request *old_request)
{ {
isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb, isci_task_build_tmf(tmf, code, tmf_sent_cb,
(void *)old_request); (void *)old_request);
tmf->io_tag = old_request->io_tag; tmf->io_tag = old_request->io_tag;
} }
static struct isci_request *isci_task_get_request_from_task(
struct sas_task *task,
struct isci_remote_device **isci_device)
{
struct isci_request *request = NULL;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
request = task->lldd_task;
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
(request != NULL)) {
if (isci_device != NULL)
*isci_device = request->isci_device;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
return request;
}
/** /**
* isci_task_validate_request_to_abort() - This function checks the given I/O * isci_task_validate_request_to_abort() - This function checks the given I/O
* against the "started" state. If the request is still "started", it's * against the "started" state. If the request is still "started", it's
...@@ -858,11 +811,10 @@ static int isci_task_send_lu_reset_sas( ...@@ -858,11 +811,10 @@ static int isci_task_send_lu_reset_sas(
* value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
* was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
*/ */
isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL, isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
NULL);
#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS); ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
if (ret == TMF_RESP_FUNC_COMPLETE) if (ret == TMF_RESP_FUNC_COMPLETE)
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
...@@ -888,33 +840,33 @@ static int isci_task_send_lu_reset_sas( ...@@ -888,33 +840,33 @@ static int isci_task_send_lu_reset_sas(
int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
{ {
struct isci_host *isci_host = dev_to_ihost(domain_device); struct isci_host *isci_host = dev_to_ihost(domain_device);
struct isci_remote_device *isci_device = NULL; struct isci_remote_device *isci_device;
unsigned long flags;
int ret; int ret;
bool device_stopping = false;
isci_device = domain_device->lldd_dev; spin_lock_irqsave(&isci_host->scic_lock, flags);
isci_device = isci_lookup_device(domain_device);
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n", "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
__func__, domain_device, isci_host, isci_device); __func__, domain_device, isci_host, isci_device);
if (isci_device != NULL) { if (isci_device)
device_stopping = (isci_device->status == isci_stopping)
|| (isci_device->status == isci_stopped);
set_bit(IDEV_EH, &isci_device->flags); set_bit(IDEV_EH, &isci_device->flags);
}
/* If there is a device reset pending on any request in the /* If there is a device reset pending on any request in the
* device's list, fail this LUN reset request in order to * device's list, fail this LUN reset request in order to
* escalate to the device reset. * escalate to the device reset.
*/ */
if (!isci_device || device_stopping || if (!isci_device ||
isci_device_is_reset_pending(isci_host, isci_device)) { isci_device_is_reset_pending(isci_host, isci_device)) {
dev_warn(&isci_host->pdev->dev, dev_warn(&isci_host->pdev->dev,
"%s: No dev (%p), or " "%s: No dev (%p), or "
"RESET PENDING: domain_device=%p\n", "RESET PENDING: domain_device=%p\n",
__func__, isci_device, domain_device); __func__, isci_device, domain_device);
return TMF_RESP_FUNC_FAILED; ret = TMF_RESP_FUNC_FAILED;
goto out;
} }
/* Send the task management part of the reset. */ /* Send the task management part of the reset. */
...@@ -929,6 +881,8 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) ...@@ -929,6 +881,8 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
isci_terminate_pending_requests(isci_host, isci_terminate_pending_requests(isci_host,
isci_device); isci_device);
out:
isci_put_device(isci_device);
return ret; return ret;
} }
...@@ -1023,60 +977,54 @@ int isci_task_abort_task(struct sas_task *task) ...@@ -1023,60 +977,54 @@ int isci_task_abort_task(struct sas_task *task)
int ret = TMF_RESP_FUNC_FAILED; int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags; unsigned long flags;
bool any_dev_reset = false; bool any_dev_reset = false;
bool device_stopping;
/* Get the isci_request reference from the task. Note that /* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list * this check does not depend on the pending request list
* in the device, because tasks driving resets may land here * in the device, because tasks driving resets may land here
* after completion in the core. * after completion in the core.
*/ */
old_request = isci_task_get_request_from_task(task, &isci_device); spin_lock_irqsave(&isci_host->scic_lock, flags);
spin_lock(&task->task_state_lock);
old_request = task->lldd_task;
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request)
isci_device = isci_lookup_device(task->dev);
spin_unlock(&task->task_state_lock);
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
"%s: task = %p\n", __func__, task); "%s: task = %p\n", __func__, task);
/* Check if the device has been / is currently being removed. if (!isci_device || !old_request)
* If so, no task management will be done, and the I/O will goto out;
* be terminated.
*/
device_stopping = (isci_device->status == isci_stopping)
|| (isci_device->status == isci_stopped);
/* XXX need to fix device lookup lifetime (needs to be done
* under scic_lock, among other things...), but for now assume
* the device is available like the above code
*/
set_bit(IDEV_EH, &isci_device->flags); set_bit(IDEV_EH, &isci_device->flags);
/* This version of the driver will fail abort requests for /* This version of the driver will fail abort requests for
* SATA/STP. Failing the abort request this way will cause the * SATA/STP. Failing the abort request this way will cause the
* SCSI error handler thread to escalate to LUN reset * SCSI error handler thread to escalate to LUN reset
*/ */
if (sas_protocol_ata(task->task_proto) && !device_stopping) { if (sas_protocol_ata(task->task_proto)) {
dev_warn(&isci_host->pdev->dev, dev_warn(&isci_host->pdev->dev,
" task %p is for a STP/SATA device;" " task %p is for a STP/SATA device;"
" returning TMF_RESP_FUNC_FAILED\n" " returning TMF_RESP_FUNC_FAILED\n"
" to cause a LUN reset...\n", task); " to cause a LUN reset...\n", task);
return TMF_RESP_FUNC_FAILED; goto out;
} }
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
"%s: old_request == %p\n", __func__, old_request); "%s: old_request == %p\n", __func__, old_request);
if (!device_stopping) any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
/* Don't do resets to stopping devices. */ any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
if (device_stopping) {
task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
any_dev_reset = false;
} else /* See if there is a pending device reset for this device. */
any_dev_reset = any_dev_reset
|| (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
/* If the extraction of the request reference from the task /* If the extraction of the request reference from the task
* failed, then the request has been completed (or if there is a * failed, then the request has been completed (or if there is a
...@@ -1130,8 +1078,7 @@ int isci_task_abort_task(struct sas_task *task) ...@@ -1130,8 +1078,7 @@ int isci_task_abort_task(struct sas_task *task)
"%s: abort task not needed for %p\n", "%s: abort task not needed for %p\n",
__func__, task); __func__, task);
} }
goto out;
return ret;
} }
else else
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
...@@ -1158,11 +1105,10 @@ int isci_task_abort_task(struct sas_task *task) ...@@ -1158,11 +1105,10 @@ int isci_task_abort_task(struct sas_task *task)
"%s: device = %p; old_request %p already being aborted\n", "%s: device = %p; old_request %p already being aborted\n",
__func__, __func__,
isci_device, old_request); isci_device, old_request);
ret = TMF_RESP_FUNC_COMPLETE;
return TMF_RESP_FUNC_COMPLETE; goto out;
} }
if ((task->task_proto == SAS_PROTOCOL_SMP) if ((task->task_proto == SAS_PROTOCOL_SMP)
|| device_stopping
|| old_request->complete_in_target || old_request->complete_in_target
) { ) {
...@@ -1170,10 +1116,9 @@ int isci_task_abort_task(struct sas_task *task) ...@@ -1170,10 +1116,9 @@ int isci_task_abort_task(struct sas_task *task)
dev_dbg(&isci_host->pdev->dev, dev_dbg(&isci_host->pdev->dev,
"%s: SMP request (%d)" "%s: SMP request (%d)"
" or device is stopping (%d)"
" or complete_in_target (%d), thus no TMF\n", " or complete_in_target (%d), thus no TMF\n",
__func__, (task->task_proto == SAS_PROTOCOL_SMP), __func__, (task->task_proto == SAS_PROTOCOL_SMP),
device_stopping, old_request->complete_in_target); old_request->complete_in_target);
/* Set the state on the task. */ /* Set the state on the task. */
isci_task_all_done(task); isci_task_all_done(task);
...@@ -1185,15 +1130,14 @@ int isci_task_abort_task(struct sas_task *task) ...@@ -1185,15 +1130,14 @@ int isci_task_abort_task(struct sas_task *task)
*/ */
} else { } else {
/* Fill in the tmf stucture */ /* Fill in the tmf stucture */
isci_task_build_abort_task_tmf(&tmf, isci_device, isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
isci_tmf_ssp_task_abort,
isci_abort_task_process_cb, isci_abort_task_process_cb,
old_request); old_request);
spin_unlock_irqrestore(&isci_host->scic_lock, flags); spin_unlock_irqrestore(&isci_host->scic_lock, flags);
#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
ret = isci_task_execute_tmf(isci_host, &tmf, ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
ISCI_ABORT_TASK_TIMEOUT_MS); ISCI_ABORT_TASK_TIMEOUT_MS);
if (ret != TMF_RESP_FUNC_COMPLETE) if (ret != TMF_RESP_FUNC_COMPLETE)
...@@ -1212,6 +1156,8 @@ int isci_task_abort_task(struct sas_task *task) ...@@ -1212,6 +1156,8 @@ int isci_task_abort_task(struct sas_task *task)
/* Make sure we do not leave a reference to aborted_io_completion */ /* Make sure we do not leave a reference to aborted_io_completion */
old_request->io_request_completion = NULL; old_request->io_request_completion = NULL;
out:
isci_put_device(isci_device);
return ret; return ret;
} }
...@@ -1305,7 +1251,6 @@ isci_task_request_complete(struct isci_host *ihost, ...@@ -1305,7 +1251,6 @@ isci_task_request_complete(struct isci_host *ihost,
struct isci_request *ireq, struct isci_request *ireq,
enum sci_task_status completion_status) enum sci_task_status completion_status)
{ {
struct isci_remote_device *idev = ireq->isci_device;
struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct isci_tmf *tmf = isci_request_access_tmf(ireq);
struct completion *tmf_complete; struct completion *tmf_complete;
struct scic_sds_request *sci_req = &ireq->sci; struct scic_sds_request *sci_req = &ireq->sci;
...@@ -1332,7 +1277,7 @@ isci_task_request_complete(struct isci_host *ihost, ...@@ -1332,7 +1277,7 @@ isci_task_request_complete(struct isci_host *ihost,
/* PRINT_TMF( ((struct isci_tmf *)request->task)); */ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
tmf_complete = tmf->complete; tmf_complete = tmf->complete;
scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci); scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci);
/* set the 'terminated' flag handle to make sure it cannot be terminated /* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again. * or completed again.
*/ */
...@@ -1583,11 +1528,10 @@ static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy ...@@ -1583,11 +1528,10 @@ static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy
dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__); dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
} }
static int isci_reset_device(struct domain_device *dev, int hard_reset) static int isci_reset_device(struct isci_host *ihost,
struct isci_remote_device *idev, int hard_reset)
{ {
struct isci_remote_device *idev = dev->lldd_dev; struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
struct sas_phy *phy = sas_find_local_phy(dev);
struct isci_host *ihost = dev_to_ihost(dev);
struct isci_port *iport = idev->isci_port; struct isci_port *iport = idev->isci_port;
enum sci_status status; enum sci_status status;
unsigned long flags; unsigned long flags;
...@@ -1595,14 +1539,6 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset) ...@@ -1595,14 +1539,6 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset)
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
if (!idev) {
dev_warn(&ihost->pdev->dev,
"%s: idev is GONE!\n",
__func__);
return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
}
spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock_irqsave(&ihost->scic_lock, flags);
status = scic_remote_device_reset(&idev->sci); status = scic_remote_device_reset(&idev->sci);
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {
...@@ -1662,35 +1598,50 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset) ...@@ -1662,35 +1598,50 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset)
int isci_task_I_T_nexus_reset(struct domain_device *dev) int isci_task_I_T_nexus_reset(struct domain_device *dev)
{ {
struct isci_host *ihost = dev_to_ihost(dev); struct isci_host *ihost = dev_to_ihost(dev);
int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1;
struct isci_remote_device *idev; struct isci_remote_device *idev;
int ret, hard_reset = 1;
unsigned long flags; unsigned long flags;
/* XXX mvsas is not protecting against ->lldd_dev_gone(), are we
* being too paranoid, or is mvsas busted?!
*/
spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock_irqsave(&ihost->scic_lock, flags);
idev = dev->lldd_dev; idev = isci_lookup_device(dev);
if (!idev || !test_bit(IDEV_EH, &idev->flags))
ret = TMF_RESP_FUNC_COMPLETE;
spin_unlock_irqrestore(&ihost->scic_lock, flags); spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (ret == TMF_RESP_FUNC_COMPLETE) if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
return ret; ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
hard_reset = 0; hard_reset = 0;
return isci_reset_device(dev, hard_reset); ret = isci_reset_device(ihost, idev, hard_reset);
out:
isci_put_device(idev);
return ret;
} }
int isci_bus_reset_handler(struct scsi_cmnd *cmd) int isci_bus_reset_handler(struct scsi_cmnd *cmd)
{ {
struct domain_device *dev = sdev_to_domain_dev(cmd->device); struct domain_device *dev = sdev_to_domain_dev(cmd->device);
int hard_reset = 1; struct isci_host *ihost = dev_to_ihost(dev);
struct isci_remote_device *idev;
int ret, hard_reset = 1;
unsigned long flags;
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
hard_reset = 0; hard_reset = 0;
return isci_reset_device(dev, hard_reset); spin_lock_irqsave(&ihost->scic_lock, flags);
idev = isci_lookup_device(dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (!idev) {
ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
ret = isci_reset_device(ihost, idev, hard_reset);
out:
isci_put_device(idev);
return ret;
} }
...@@ -213,18 +213,15 @@ int isci_bus_reset_handler(struct scsi_cmnd *cmd); ...@@ -213,18 +213,15 @@ int isci_bus_reset_handler(struct scsi_cmnd *cmd);
void isci_task_build_tmf( void isci_task_build_tmf(
struct isci_tmf *tmf, struct isci_tmf *tmf,
struct isci_remote_device *isci_device,
enum isci_tmf_function_codes code, enum isci_tmf_function_codes code,
void (*tmf_sent_cb)(enum isci_tmf_cb_state, void (*tmf_sent_cb)(enum isci_tmf_cb_state,
struct isci_tmf *, struct isci_tmf *,
void *), void *),
void *cb_data); void *cb_data);
int isci_task_execute_tmf(struct isci_host *isci_host,
int isci_task_execute_tmf( struct isci_remote_device *idev,
struct isci_host *isci_host, struct isci_tmf *tmf, unsigned long timeout_ms);
struct isci_tmf *tmf,
unsigned long timeout_ms);
/** /**
* enum isci_completion_selection - This enum defines the possible actions to * enum isci_completion_selection - This enum defines the possible actions to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment