Commit 14aaa9f0 authored by Jeff Skirvin's avatar Jeff Skirvin Committed by Dan Williams

isci: Redesign device suspension, abort, cleanup.

This commit changes the means by which outstanding I/Os are handled
for cleanup.
The likelihood is that this commit will be broken into smaller pieces,
however that will be a later revision.  Among the changes:

- All completion structures have been removed from the tmf and
abort paths.
- Now using one completed I/O list, with the I/O completed in host bit being
used to select error or normal callback paths.
Signed-off-by: default avatarJeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent d80ecd57
...@@ -1089,33 +1089,25 @@ void isci_host_completion_routine(unsigned long data) ...@@ -1089,33 +1089,25 @@ void isci_host_completion_routine(unsigned long data)
{ {
struct isci_host *ihost = (struct isci_host *)data; struct isci_host *ihost = (struct isci_host *)data;
struct list_head completed_request_list; struct list_head completed_request_list;
struct list_head errored_request_list;
struct list_head *current_position; struct list_head *current_position;
struct list_head *next_position; struct list_head *next_position;
struct isci_request *request; struct isci_request *request;
struct isci_request *next_request;
struct sas_task *task; struct sas_task *task;
u16 active; u16 active;
INIT_LIST_HEAD(&completed_request_list); INIT_LIST_HEAD(&completed_request_list);
INIT_LIST_HEAD(&errored_request_list);
spin_lock_irq(&ihost->scic_lock); spin_lock_irq(&ihost->scic_lock);
sci_controller_completion_handler(ihost); sci_controller_completion_handler(ihost);
/* Take the lists of completed I/Os from the host. */ /* Take the lists of completed I/Os from the host. */
list_splice_init(&ihost->requests_to_complete, list_splice_init(&ihost->requests_to_complete,
&completed_request_list); &completed_request_list);
/* Take the list of errored I/Os from the host. */
list_splice_init(&ihost->requests_to_errorback,
&errored_request_list);
spin_unlock_irq(&ihost->scic_lock); spin_unlock_irq(&ihost->scic_lock);
/* Process any completions in the lists. */ /* Process any completions in the list. */
list_for_each_safe(current_position, next_position, list_for_each_safe(current_position, next_position,
&completed_request_list) { &completed_request_list) {
...@@ -1123,23 +1115,30 @@ void isci_host_completion_routine(unsigned long data) ...@@ -1123,23 +1115,30 @@ void isci_host_completion_routine(unsigned long data)
completed_node); completed_node);
task = isci_request_access_task(request); task = isci_request_access_task(request);
/* Normal notification (task_done) */
dev_dbg(&ihost->pdev->dev,
"%s: Normal - request/task = %p/%p\n",
__func__,
request,
task);
/* Return the task to libsas */ /* Return the task to libsas */
if (task != NULL) { if (task != NULL) {
task->lldd_task = NULL; task->lldd_task = NULL;
if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &request->flags) &&
!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
/* If the task is already in the abort path, if (test_bit(IREQ_COMPLETE_IN_TARGET,
* the task_done callback cannot be called. &request->flags)) {
*/
task->task_done(task); /* Normal notification (task_done) */
dev_dbg(&ihost->pdev->dev, "%s: Normal"
" - request/task = %p/%p\n",
__func__, request, task);
task->task_done(task);
} else {
dev_warn(&ihost->pdev->dev,
"%s: Error - request/task"
" = %p/%p\n",
__func__, request, task);
sas_task_abort(task);
}
} }
} }
...@@ -1147,44 +1146,6 @@ void isci_host_completion_routine(unsigned long data) ...@@ -1147,44 +1146,6 @@ void isci_host_completion_routine(unsigned long data)
isci_free_tag(ihost, request->io_tag); isci_free_tag(ihost, request->io_tag);
spin_unlock_irq(&ihost->scic_lock); spin_unlock_irq(&ihost->scic_lock);
} }
list_for_each_entry_safe(request, next_request, &errored_request_list,
completed_node) {
task = isci_request_access_task(request);
/* Use sas_task_abort */
dev_warn(&ihost->pdev->dev,
"%s: Error - request/task = %p/%p\n",
__func__,
request,
task);
if (task != NULL) {
/* Put the task into the abort path if it's not there
* already.
*/
if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
sas_task_abort(task);
} else {
/* This is a case where the request has completed with a
* status such that it needed further target servicing,
* but the sas_task reference has already been removed
* from the request. Since it was errored, it was not
* being aborted, so there is nothing to do except free
* it.
*/
spin_lock_irq(&ihost->scic_lock);
/* Remove the request from the remote device's list
* of pending requests.
*/
list_del_init(&request->dev_node);
isci_free_tag(ihost, request->io_tag);
spin_unlock_irq(&ihost->scic_lock);
}
}
/* the coalesence timeout doubles at each encoding step, so /* the coalesence timeout doubles at each encoding step, so
* update it based on the ilog2 value of the outstanding requests * update it based on the ilog2 value of the outstanding requests
...@@ -2345,7 +2306,6 @@ static int sci_controller_dma_alloc(struct isci_host *ihost) ...@@ -2345,7 +2306,6 @@ static int sci_controller_dma_alloc(struct isci_host *ihost)
ireq->tc = &ihost->task_context_table[i]; ireq->tc = &ihost->task_context_table[i];
ireq->owning_controller = ihost; ireq->owning_controller = ihost;
spin_lock_init(&ireq->state_lock);
ireq->request_daddr = dma; ireq->request_daddr = dma;
ireq->isci_host = ihost; ireq->isci_host = ihost;
ihost->reqs[i] = ireq; ihost->reqs[i] = ireq;
...@@ -2697,6 +2657,10 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost, ...@@ -2697,6 +2657,10 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
return SCI_FAILURE_INVALID_STATE; return SCI_FAILURE_INVALID_STATE;
} }
status = sci_io_request_terminate(ireq); status = sci_io_request_terminate(ireq);
dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
__func__, status, ireq, ireq->flags);
if ((status == SCI_SUCCESS) && if ((status == SCI_SUCCESS) &&
!test_bit(IREQ_PENDING_ABORT, &ireq->flags) && !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
!test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
...@@ -2739,6 +2703,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost, ...@@ -2739,6 +2703,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
index = ISCI_TAG_TCI(ireq->io_tag); index = ISCI_TAG_TCI(ireq->io_tag);
clear_bit(IREQ_ACTIVE, &ireq->flags); clear_bit(IREQ_ACTIVE, &ireq->flags);
if (test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
wake_up_all(&ihost->eventq);
return SCI_SUCCESS; return SCI_SUCCESS;
default: default:
dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
......
...@@ -205,7 +205,6 @@ struct isci_host { ...@@ -205,7 +205,6 @@ struct isci_host {
wait_queue_head_t eventq; wait_queue_head_t eventq;
struct tasklet_struct completion_tasklet; struct tasklet_struct completion_tasklet;
struct list_head requests_to_complete; struct list_head requests_to_complete;
struct list_head requests_to_errorback;
spinlock_t scic_lock; spinlock_t scic_lock;
struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
......
...@@ -556,7 +556,6 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) ...@@ -556,7 +556,6 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
} }
INIT_LIST_HEAD(&ihost->requests_to_complete); INIT_LIST_HEAD(&ihost->requests_to_complete);
INIT_LIST_HEAD(&ihost->requests_to_errorback);
for (i = 0; i < SCI_MAX_PORTS; i++) { for (i = 0; i < SCI_MAX_PORTS; i++) {
struct isci_port *iport = &ihost->ports[i]; struct isci_port *iport = &ihost->ports[i];
......
This diff is collapsed.
...@@ -85,7 +85,6 @@ struct isci_remote_device { ...@@ -85,7 +85,6 @@ struct isci_remote_device {
#define IDEV_GONE 3 #define IDEV_GONE 3
#define IDEV_IO_READY 4 #define IDEV_IO_READY 4
#define IDEV_IO_NCQERROR 5 #define IDEV_IO_NCQERROR 5
#define IDEV_TXRX_SUSPENDED 6
unsigned long flags; unsigned long flags;
struct kref kref; struct kref kref;
struct isci_port *isci_port; struct isci_port *isci_port;
...@@ -107,10 +106,8 @@ struct isci_remote_device { ...@@ -107,10 +106,8 @@ struct isci_remote_device {
/* device reference routines must be called under sci_lock */ /* device reference routines must be called under sci_lock */
static inline struct isci_remote_device *isci_get_device( static inline struct isci_remote_device *isci_get_device(
struct domain_device *dev) struct isci_remote_device *idev)
{ {
struct isci_remote_device *idev = dev->lldd_dev;
if (idev) if (idev)
kref_get(&idev->kref); kref_get(&idev->kref);
return idev; return idev;
...@@ -378,4 +375,14 @@ enum sci_status isci_remote_device_reset( ...@@ -378,4 +375,14 @@ enum sci_status isci_remote_device_reset(
enum sci_status isci_remote_device_reset_complete( enum sci_status isci_remote_device_reset_complete(
struct isci_host *ihost, struct isci_host *ihost,
struct isci_remote_device *idev); struct isci_remote_device *idev);
enum sci_status isci_remote_device_suspend_terminate(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status isci_remote_device_terminate_requests(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
...@@ -317,8 +317,6 @@ static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_ ...@@ -317,8 +317,6 @@ static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_
struct isci_remote_device *idev = rnc_to_dev(rnc); struct isci_remote_device *idev = rnc_to_dev(rnc);
struct isci_host *ihost = idev->owning_port->owning_controller; struct isci_host *ihost = idev->owning_port->owning_controller;
set_bit(IDEV_TXRX_SUSPENDED, &idev->flags);
/* Terminate outstanding requests pending abort. */ /* Terminate outstanding requests pending abort. */
sci_remote_device_abort_requests_pending_abort(idev); sci_remote_device_abort_requests_pending_abort(idev);
...@@ -326,16 +324,6 @@ static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_ ...@@ -326,16 +324,6 @@ static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_
sci_remote_node_context_continue_state_transitions(rnc); sci_remote_node_context_continue_state_transitions(rnc);
} }
static void sci_remote_node_context_tx_rx_suspended_state_exit(
struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc
= container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev = rnc_to_dev(rnc);
clear_bit(IDEV_TXRX_SUSPENDED, &idev->flags);
}
static void sci_remote_node_context_await_suspend_state_exit( static void sci_remote_node_context_await_suspend_state_exit(
struct sci_base_state_machine *sm) struct sci_base_state_machine *sm)
{ {
...@@ -366,8 +354,6 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = { ...@@ -366,8 +354,6 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = {
}, },
[SCI_RNC_TX_RX_SUSPENDED] = { [SCI_RNC_TX_RX_SUSPENDED] = {
.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
.exit_state
= sci_remote_node_context_tx_rx_suspended_state_exit,
}, },
[SCI_RNC_AWAIT_SUSPENSION] = { [SCI_RNC_AWAIT_SUSPENSION] = {
.exit_state = sci_remote_node_context_await_suspend_state_exit, .exit_state = sci_remote_node_context_await_suspend_state_exit,
...@@ -671,8 +657,11 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context ...@@ -671,8 +657,11 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context
} }
} }
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, enum sci_status sci_remote_node_context_start_task(
struct isci_request *ireq) struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{ {
enum scis_sds_remote_node_context_states state; enum scis_sds_remote_node_context_states state;
...@@ -684,7 +673,7 @@ enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_contex ...@@ -684,7 +673,7 @@ enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_contex
return SCI_SUCCESS; return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED:
sci_remote_node_context_resume(sci_rnc, NULL, NULL); sci_remote_node_context_resume(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS; return SCI_SUCCESS;
default: default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
......
...@@ -211,7 +211,9 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s ...@@ -211,7 +211,9 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s
scics_sds_remote_node_context_callback cb_fn, scics_sds_remote_node_context_callback cb_fn,
void *cb_p); void *cb_p);
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq); struct isci_request *ireq,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq); struct isci_request *ireq);
int sci_remote_node_context_is_safe_to_abort( int sci_remote_node_context_is_safe_to_abort(
......
This diff is collapsed.
...@@ -60,23 +60,6 @@ ...@@ -60,23 +60,6 @@
#include "host.h" #include "host.h"
#include "scu_task_context.h" #include "scu_task_context.h"
/**
* struct isci_request_status - This enum defines the possible states of an I/O
* request.
*
*
*/
enum isci_request_status {
unallocated = 0x00,
allocated = 0x01,
started = 0x02,
completed = 0x03,
aborting = 0x04,
aborted = 0x05,
terminating = 0x06,
dead = 0x07
};
/** /**
* isci_stp_request - extra request infrastructure to handle pio/atapi protocol * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
* @pio_len - number of bytes requested at PIO setup * @pio_len - number of bytes requested at PIO setup
...@@ -97,13 +80,13 @@ struct isci_stp_request { ...@@ -97,13 +80,13 @@ struct isci_stp_request {
}; };
struct isci_request { struct isci_request {
enum isci_request_status status;
#define IREQ_COMPLETE_IN_TARGET 0 #define IREQ_COMPLETE_IN_TARGET 0
#define IREQ_TERMINATED 1 #define IREQ_TERMINATED 1
#define IREQ_TMF 2 #define IREQ_TMF 2
#define IREQ_ACTIVE 3 #define IREQ_ACTIVE 3
#define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
#define IREQ_TC_ABORT_POSTED 5 #define IREQ_TC_ABORT_POSTED 5
#define IREQ_ABORT_PATH_ACTIVE 6
unsigned long flags; unsigned long flags;
/* XXX kill ttype and ttype_ptr, allocate full sas_task */ /* XXX kill ttype and ttype_ptr, allocate full sas_task */
union ttype_ptr_union { union ttype_ptr_union {
...@@ -115,7 +98,6 @@ struct isci_request { ...@@ -115,7 +98,6 @@ struct isci_request {
struct list_head completed_node; struct list_head completed_node;
/* For use in the reqs_in_process list: */ /* For use in the reqs_in_process list: */
struct list_head dev_node; struct list_head dev_node;
spinlock_t state_lock;
dma_addr_t request_daddr; dma_addr_t request_daddr;
dma_addr_t zero_scatter_daddr; dma_addr_t zero_scatter_daddr;
unsigned int num_sg_entries; unsigned int num_sg_entries;
...@@ -304,92 +286,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) ...@@ -304,92 +286,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
return ireq->request_daddr + (requested_addr - base_addr); return ireq->request_daddr + (requested_addr - base_addr);
} }
/**
* isci_request_change_state() - This function sets the status of the request
* object.
* @request: This parameter points to the isci_request object
* @status: This Parameter is the new status of the object
*
*/
static inline enum isci_request_status
isci_request_change_state(struct isci_request *isci_request,
enum isci_request_status status)
{
enum isci_request_status old_state;
unsigned long flags;
dev_dbg(&isci_request->isci_host->pdev->dev,
"%s: isci_request = %p, state = 0x%x\n",
__func__,
isci_request,
status);
BUG_ON(isci_request == NULL);
spin_lock_irqsave(&isci_request->state_lock, flags);
old_state = isci_request->status;
isci_request->status = status;
spin_unlock_irqrestore(&isci_request->state_lock, flags);
return old_state;
}
/**
* isci_request_change_started_to_newstate() - This function sets the status of
* the request object.
* @request: This parameter points to the isci_request object
* @status: This Parameter is the new status of the object
*
* state previous to any change.
*/
static inline enum isci_request_status
isci_request_change_started_to_newstate(struct isci_request *isci_request,
struct completion *completion_ptr,
enum isci_request_status newstate)
{
enum isci_request_status old_state;
unsigned long flags;
spin_lock_irqsave(&isci_request->state_lock, flags);
old_state = isci_request->status;
if (old_state == started || old_state == aborting) {
BUG_ON(isci_request->io_request_completion != NULL);
isci_request->io_request_completion = completion_ptr;
isci_request->status = newstate;
}
spin_unlock_irqrestore(&isci_request->state_lock, flags);
dev_dbg(&isci_request->isci_host->pdev->dev,
"%s: isci_request = %p, old_state = 0x%x\n",
__func__,
isci_request,
old_state);
return old_state;
}
/**
* isci_request_change_started_to_aborted() - This function sets the status of
* the request object.
* @request: This parameter points to the isci_request object
* @completion_ptr: This parameter is saved as the kernel completion structure
* signalled when the old request completes.
*
* state previous to any change.
*/
static inline enum isci_request_status
isci_request_change_started_to_aborted(struct isci_request *isci_request,
struct completion *completion_ptr)
{
return isci_request_change_started_to_newstate(isci_request,
completion_ptr,
aborted);
}
#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
...@@ -399,8 +295,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, ...@@ -399,8 +295,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
u16 tag); u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
struct sas_task *task, u16 tag); struct sas_task *task, u16 tag);
void isci_terminate_pending_requests(struct isci_host *ihost,
struct isci_remote_device *idev);
enum sci_status enum sci_status
sci_task_request_construct(struct isci_host *ihost, sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev, struct isci_remote_device *idev,
......
This diff is collapsed.
...@@ -62,19 +62,6 @@ ...@@ -62,19 +62,6 @@
struct isci_request; struct isci_request;
/**
* enum isci_tmf_cb_state - This enum defines the possible states in which the
* TMF callback function is invoked during the TMF execution process.
*
*
*/
enum isci_tmf_cb_state {
isci_tmf_init_state = 0,
isci_tmf_started,
isci_tmf_timed_out
};
/** /**
* enum isci_tmf_function_codes - This enum defines the possible preparations * enum isci_tmf_function_codes - This enum defines the possible preparations
* of task management requests. * of task management requests.
...@@ -87,6 +74,7 @@ enum isci_tmf_function_codes { ...@@ -87,6 +74,7 @@ enum isci_tmf_function_codes {
isci_tmf_ssp_task_abort = TMF_ABORT_TASK, isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
isci_tmf_ssp_lun_reset = TMF_LU_RESET, isci_tmf_ssp_lun_reset = TMF_LU_RESET,
}; };
/** /**
* struct isci_tmf - This class represents the task management object which * struct isci_tmf - This class represents the task management object which
* acts as an interface to libsas for processing task management requests * acts as an interface to libsas for processing task management requests
...@@ -106,15 +94,6 @@ struct isci_tmf { ...@@ -106,15 +94,6 @@ struct isci_tmf {
u16 io_tag; u16 io_tag;
enum isci_tmf_function_codes tmf_code; enum isci_tmf_function_codes tmf_code;
int status; int status;
/* The optional callback function allows the user process to
* track the TMF transmit / timeout conditions.
*/
void (*cb_state_func)(
enum isci_tmf_cb_state,
struct isci_tmf *, void *);
void *cb_data;
}; };
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
...@@ -208,113 +187,4 @@ int isci_queuecommand( ...@@ -208,113 +187,4 @@ int isci_queuecommand(
struct scsi_cmnd *scsi_cmd, struct scsi_cmnd *scsi_cmd,
void (*donefunc)(struct scsi_cmnd *)); void (*donefunc)(struct scsi_cmnd *));
/**
* enum isci_completion_selection - This enum defines the possible actions to
* take with respect to a given request's notification back to libsas.
*
*
*/
enum isci_completion_selection {
isci_perform_normal_io_completion, /* Normal notify (task_done) */
isci_perform_aborted_io_completion, /* No notification. */
isci_perform_error_io_completion /* Use sas_task_abort */
};
/**
* isci_task_set_completion_status() - This function sets the completion status
* for the request.
* @task: This parameter is the completed request.
* @response: This parameter is the response code for the completed task.
* @status: This parameter is the status code for the completed task.
*
* @return The new notification mode for the request.
*/
static inline enum isci_completion_selection
isci_task_set_completion_status(
struct sas_task *task,
enum service_response response,
enum exec_status status,
enum isci_completion_selection task_notification_selection)
{
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
/* If a device reset is being indicated, make sure the I/O
* is in the error path.
*/
if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
/* Fail the I/O to make sure it goes into the error path. */
response = SAS_TASK_UNDELIVERED;
status = SAM_STAT_TASK_ABORTED;
task_notification_selection = isci_perform_error_io_completion;
}
task->task_status.resp = response;
task->task_status.stat = status;
switch (task->task_proto) {
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
if (task_notification_selection
== isci_perform_error_io_completion) {
/* SATA/STP I/O has it's own means of scheduling device
* error handling on the normal path.
*/
task_notification_selection
= isci_perform_normal_io_completion;
}
break;
default:
break;
}
switch (task_notification_selection) {
case isci_perform_error_io_completion:
if (task->task_proto == SAS_PROTOCOL_SMP) {
/* There is no error escalation in the SMP case.
* Convert to a normal completion to avoid the
* timeout in the discovery path and to let the
* next action take place quickly.
*/
task_notification_selection
= isci_perform_normal_io_completion;
/* Fall through to the normal case... */
} else {
/* Use sas_task_abort */
/* Leave SAS_TASK_STATE_DONE clear
* Leave SAS_TASK_AT_INITIATOR set.
*/
break;
}
case isci_perform_aborted_io_completion:
/* This path can occur with task-managed requests as well as
* requests terminated because of LUN or device resets.
*/
/* Fall through to the normal case... */
case isci_perform_normal_io_completion:
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
SAS_TASK_STATE_PENDING);
break;
default:
WARN_ONCE(1, "unknown task_notification_selection: %d\n",
task_notification_selection);
break;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
return task_notification_selection;
}
#endif /* !defined(_SCI_TASK_H_) */ #endif /* !defined(_SCI_TASK_H_) */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment