Commit dd9e11d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "Six late arriving patches for the merge window. Five are minor
  assorted fixes and updates.

  The IPR driver change removes SATA support, which will now allow a
  major cleanup in the ATA subsystem because it was the only driver
  still using the old attachment mechanism. The driver is only used on
  power systems and SATA was used to support a DVD device, which has
  long been moved to a different hba. IBM chose this route instead of
  porting ipr to the newer SATA interfaces"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: qedi: Fix use after free bug in qedi_remove()
  scsi: ufs: core: mcq: Fix &hwq->cq_lock deadlock issue
  scsi: ipr: Remove several unused variables
  scsi: pm80xx: Log device registration
  scsi: ipr: Remove SATA support
  scsi: scsi_debug: Abort commands from scsi_debug_device_reset()
parents a3b111b0 c5749639
...@@ -971,8 +971,7 @@ config SCSI_SYM53C8XX_MMIO ...@@ -971,8 +971,7 @@ config SCSI_SYM53C8XX_MMIO
config SCSI_IPR config SCSI_IPR
tristate "IBM Power Linux RAID adapter support" tristate "IBM Power Linux RAID adapter support"
depends on PCI && SCSI && ATA depends on PCI && SCSI
select SATA_HOST
select FW_LOADER select FW_LOADER
select IRQ_POLL select IRQ_POLL
select SGL_ALLOC select SGL_ALLOC
......
...@@ -58,7 +58,6 @@ ...@@ -58,7 +58,6 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/libata.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/stringify.h> #include <linux/stringify.h>
...@@ -595,10 +594,6 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, ...@@ -595,10 +594,6 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->time = jiffies; trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type; trace_entry->type = type;
if (ipr_cmd->ioa_cfg->sis64)
trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
else
trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data; trace_entry->u.add_data = add_data;
...@@ -636,7 +631,6 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) ...@@ -636,7 +631,6 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
{ {
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr; dma_addr_t dma_addr = ipr_cmd->dma_addr;
int hrrq_id; int hrrq_id;
...@@ -651,18 +645,15 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) ...@@ -651,18 +645,15 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
if (ipr_cmd->ioa_cfg->sis64) { if (ipr_cmd->ioa_cfg->sis64) {
ioarcb->u.sis64_addr_data.data_ioadl_addr = ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
ioasa64->u.gata.status = 0;
} else { } else {
ioarcb->write_ioadl_addr = ioarcb->write_ioadl_addr =
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
ioasa->u.gata.status = 0;
} }
ioasa->hdr.ioasc = 0; ioasa->hdr.ioasc = 0;
ioasa->hdr.residual_data_len = 0; ioasa->hdr.residual_data_len = 0;
ipr_cmd->scsi_cmd = NULL; ipr_cmd->scsi_cmd = NULL;
ipr_cmd->qc = NULL;
ipr_cmd->sense_buffer[0] = 0; ipr_cmd->sense_buffer[0] = 0;
ipr_cmd->dma_use_sg = 0; ipr_cmd->dma_use_sg = 0;
} }
...@@ -806,48 +797,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) ...@@ -806,48 +797,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
return 0; return 0;
} }
/**
* __ipr_sata_eh_done - done function for aborted SATA commands
* @ipr_cmd: ipr command struct
*
* This function is invoked for ops generated to SATA
* devices which are being aborted.
*
* Return value:
* none
**/
static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
ata_qc_complete(qc);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
* ipr_sata_eh_done - done function for aborted SATA commands
* @ipr_cmd: ipr command struct
*
* This function is invoked for ops generated to SATA
* devices which are being aborted.
*
* Return value:
* none
**/
static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
unsigned long hrrq_flags;
spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
__ipr_sata_eh_done(ipr_cmd);
spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
}
/** /**
* __ipr_scsi_eh_done - mid-layer done function for aborted ops * __ipr_scsi_eh_done - mid-layer done function for aborted ops
...@@ -920,8 +869,6 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) ...@@ -920,8 +869,6 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
if (ipr_cmd->scsi_cmd) if (ipr_cmd->scsi_cmd)
ipr_cmd->done = __ipr_scsi_eh_done; ipr_cmd->done = __ipr_scsi_eh_done;
else if (ipr_cmd->qc)
ipr_cmd->done = __ipr_sata_eh_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
IPR_IOASC_IOA_WAS_RESET); IPR_IOASC_IOA_WAS_RESET);
...@@ -1142,31 +1089,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, ...@@ -1142,31 +1089,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
} }
} }
/**
* ipr_update_ata_class - Update the ata class in the resource entry
* @res: resource entry struct
* @proto: cfgte device bus protocol value
*
* Return value:
* none
**/
static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
{
switch (proto) {
case IPR_PROTO_SATA:
case IPR_PROTO_SAS_STP:
res->ata_class = ATA_DEV_ATA;
break;
case IPR_PROTO_SATA_ATAPI:
case IPR_PROTO_SAS_STP_ATAPI:
res->ata_class = ATA_DEV_ATAPI;
break;
default:
res->ata_class = ATA_DEV_UNKNOWN;
break;
}
}
/** /**
* ipr_init_res_entry - Initialize a resource entry struct. * ipr_init_res_entry - Initialize a resource entry struct.
* @res: resource entry struct * @res: resource entry struct
...@@ -1179,7 +1101,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, ...@@ -1179,7 +1101,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
struct ipr_config_table_entry_wrapper *cfgtew) struct ipr_config_table_entry_wrapper *cfgtew)
{ {
int found = 0; int found = 0;
unsigned int proto;
struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
struct ipr_resource_entry *gscsi_res = NULL; struct ipr_resource_entry *gscsi_res = NULL;
...@@ -1190,10 +1111,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, ...@@ -1190,10 +1111,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->resetting_device = 0; res->resetting_device = 0;
res->reset_occurred = 0; res->reset_occurred = 0;
res->sdev = NULL; res->sdev = NULL;
res->sata_port = NULL;
if (ioa_cfg->sis64) { if (ioa_cfg->sis64) {
proto = cfgtew->u.cfgte64->proto;
res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
res->qmodel = IPR_QUEUEING_MODEL64(res); res->qmodel = IPR_QUEUEING_MODEL64(res);
...@@ -1239,7 +1158,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, ...@@ -1239,7 +1158,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
set_bit(res->target, ioa_cfg->target_ids); set_bit(res->target, ioa_cfg->target_ids);
} }
} else { } else {
proto = cfgtew->u.cfgte->proto;
res->qmodel = IPR_QUEUEING_MODEL(res); res->qmodel = IPR_QUEUEING_MODEL(res);
res->flags = cfgtew->u.cfgte->flags; res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE) if (res->flags & IPR_IS_IOA_RESOURCE)
...@@ -1252,8 +1170,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, ...@@ -1252,8 +1170,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->lun = cfgtew->u.cfgte->res_addr.lun; res->lun = cfgtew->u.cfgte->res_addr.lun;
res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
} }
ipr_update_ata_class(res, proto);
} }
/** /**
...@@ -1339,7 +1255,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res, ...@@ -1339,7 +1255,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
struct ipr_config_table_entry_wrapper *cfgtew) struct ipr_config_table_entry_wrapper *cfgtew)
{ {
char buffer[IPR_MAX_RES_PATH_LENGTH]; char buffer[IPR_MAX_RES_PATH_LENGTH];
unsigned int proto;
int new_path = 0; int new_path = 0;
if (res->ioa_cfg->sis64) { if (res->ioa_cfg->sis64) {
...@@ -1351,7 +1266,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res, ...@@ -1351,7 +1266,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
sizeof(struct ipr_std_inq_data)); sizeof(struct ipr_std_inq_data));
res->qmodel = IPR_QUEUEING_MODEL64(res); res->qmodel = IPR_QUEUEING_MODEL64(res);
proto = cfgtew->u.cfgte64->proto;
res->res_handle = cfgtew->u.cfgte64->res_handle; res->res_handle = cfgtew->u.cfgte64->res_handle;
res->dev_id = cfgtew->u.cfgte64->dev_id; res->dev_id = cfgtew->u.cfgte64->dev_id;
...@@ -1380,11 +1294,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res, ...@@ -1380,11 +1294,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
sizeof(struct ipr_std_inq_data)); sizeof(struct ipr_std_inq_data));
res->qmodel = IPR_QUEUEING_MODEL(res); res->qmodel = IPR_QUEUEING_MODEL(res);
proto = cfgtew->u.cfgte->proto;
res->res_handle = cfgtew->u.cfgte->res_handle; res->res_handle = cfgtew->u.cfgte->res_handle;
} }
ipr_update_ata_class(res, proto);
} }
/** /**
...@@ -4496,17 +4407,6 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; ...@@ -4496,17 +4407,6 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
**/ **/
static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
{ {
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
qdepth = IPR_MAX_CMD_PER_ATA_LUN;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_change_queue_depth(sdev, qdepth); scsi_change_queue_depth(sdev, qdepth);
return sdev->queue_depth; return sdev->queue_depth;
} }
...@@ -4799,68 +4699,13 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) ...@@ -4799,68 +4699,13 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
return NULL; return NULL;
} }
static struct ata_port_info sata_port_info;
/**
* ipr_target_alloc - Prepare for commands to a SCSI target
* @starget: scsi target struct
*
* If the device is a SATA device, this function allocates an
* ATA port with libata, else it does nothing.
*
* Return value:
* 0 on success / non-0 on failure
**/
static int ipr_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
struct ipr_sata_port *sata_port;
struct ata_port *ap;
struct ipr_resource_entry *res;
unsigned long lock_flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = ipr_find_starget(starget);
starget->hostdata = NULL;
if (res && ipr_is_gata(res)) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
if (!sata_port)
return -ENOMEM;
ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
if (ap) {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
sata_port->ioa_cfg = ioa_cfg;
sata_port->ap = ap;
sata_port->res = res;
res->sata_port = sata_port;
ap->private_data = sata_port;
starget->hostdata = sata_port;
} else {
kfree(sata_port);
return -ENOMEM;
}
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return 0;
}
/** /**
* ipr_target_destroy - Destroy a SCSI target * ipr_target_destroy - Destroy a SCSI target
* @starget: scsi target struct * @starget: scsi target struct
* *
* If the device was a SATA device, this function frees the libata
* ATA port, else it does nothing.
*
**/ **/
static void ipr_target_destroy(struct scsi_target *starget) static void ipr_target_destroy(struct scsi_target *starget)
{ {
struct ipr_sata_port *sata_port = starget->hostdata;
struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
...@@ -4874,12 +4719,6 @@ static void ipr_target_destroy(struct scsi_target *starget) ...@@ -4874,12 +4719,6 @@ static void ipr_target_destroy(struct scsi_target *starget)
clear_bit(starget->id, ioa_cfg->target_ids); clear_bit(starget->id, ioa_cfg->target_ids);
} }
} }
if (sata_port) {
starget->hostdata = NULL;
ata_sas_port_destroy(sata_port->ap);
kfree(sata_port);
}
} }
/** /**
...@@ -4922,11 +4761,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev) ...@@ -4922,11 +4761,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *) sdev->hostdata; res = (struct ipr_resource_entry *) sdev->hostdata;
if (res) { if (res) {
if (res->sata_port)
res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
sdev->hostdata = NULL; sdev->hostdata = NULL;
res->sdev = NULL; res->sdev = NULL;
res->sata_port = NULL;
} }
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
} }
...@@ -4944,7 +4780,6 @@ static int ipr_slave_configure(struct scsi_device *sdev) ...@@ -4944,7 +4780,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
{ {
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
struct ata_port *ap = NULL;
unsigned long lock_flags = 0; unsigned long lock_flags = 0;
char buffer[IPR_MAX_RES_PATH_LENGTH]; char buffer[IPR_MAX_RES_PATH_LENGTH];
...@@ -4964,15 +4799,8 @@ static int ipr_slave_configure(struct scsi_device *sdev) ...@@ -4964,15 +4799,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
IPR_VSET_RW_TIMEOUT); IPR_VSET_RW_TIMEOUT);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
} }
if (ipr_is_gata(res) && res->sata_port)
ap = res->sata_port->ap;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
if (ap) {
scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
ata_sas_slave_configure(sdev, ap);
}
if (ioa_cfg->sis64) if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
ipr_format_res_path(ioa_cfg, ipr_format_res_path(ioa_cfg,
...@@ -4983,37 +4811,6 @@ static int ipr_slave_configure(struct scsi_device *sdev) ...@@ -4983,37 +4811,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
return 0; return 0;
} }
/**
* ipr_ata_slave_alloc - Prepare for commands to a SATA device
* @sdev: scsi device struct
*
* This function initializes an ATA port so that future commands
* sent through queuecommand will work.
*
* Return value:
* 0 on success
**/
static int ipr_ata_slave_alloc(struct scsi_device *sdev)
{
struct ipr_sata_port *sata_port = NULL;
int rc = -ENXIO;
ENTER;
if (sdev->sdev_target)
sata_port = sdev->sdev_target->hostdata;
if (sata_port) {
rc = ata_sas_port_init(sata_port->ap);
if (rc == 0)
rc = ata_sas_sync_probe(sata_port->ap);
}
if (rc)
ipr_slave_destroy(sdev);
LEAVE;
return rc;
}
/** /**
* ipr_slave_alloc - Prepare for commands to a device. * ipr_slave_alloc - Prepare for commands to a device.
* @sdev: scsi device struct * @sdev: scsi device struct
...@@ -5047,8 +4844,10 @@ static int ipr_slave_alloc(struct scsi_device *sdev) ...@@ -5047,8 +4844,10 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
res->needs_sync_complete = 1; res->needs_sync_complete = 1;
rc = 0; rc = 0;
if (ipr_is_gata(res)) { if (ipr_is_gata(res)) {
sdev_printk(KERN_ERR, sdev, "SATA devices are no longer "
"supported by this driver. Skipping device.\n");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return ipr_ata_slave_alloc(sdev); return -ENXIO;
} }
} }
...@@ -5091,23 +4890,6 @@ static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd) ...@@ -5091,23 +4890,6 @@ static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
return false; return false;
} }
/**
* ipr_match_res - Match function for specified resource entry
* @ipr_cmd: ipr command struct
* @resource: resource entry to match
*
* Returns:
* 1 if command matches sdev / 0 if command does not match sdev
**/
static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
{
struct ipr_resource_entry *res = resource;
if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
return 1;
return 0;
}
/** /**
* ipr_wait_for_ops - Wait for matching commands to complete * ipr_wait_for_ops - Wait for matching commands to complete
* @ioa_cfg: ioa config struct * @ioa_cfg: ioa config struct
...@@ -5220,8 +5002,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) ...@@ -5220,8 +5002,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
* This function issues a device reset to the affected device. * This function issues a device reset to the affected device.
* If the device is a SCSI device, a LUN reset will be sent * If the device is a SCSI device, a LUN reset will be sent
* to the device first. If that does not work, a target reset * to the device first. If that does not work, a target reset
* will be sent. If the device is a SATA device, a PHY reset will * will be sent.
* be sent.
* *
* Return value: * Return value:
* 0 on success / non-zero on failure * 0 on success / non-zero on failure
...@@ -5232,7 +5013,6 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, ...@@ -5232,7 +5013,6 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd; struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb; struct ipr_ioarcb *ioarcb;
struct ipr_cmd_pkt *cmd_pkt; struct ipr_cmd_pkt *cmd_pkt;
struct ipr_ioarcb_ata_regs *regs;
u32 ioasc; u32 ioasc;
ENTER; ENTER;
...@@ -5240,86 +5020,21 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, ...@@ -5240,86 +5020,21 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ioarcb = &ipr_cmd->ioarcb; ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt; cmd_pkt = &ioarcb->cmd_pkt;
if (ipr_cmd->ioa_cfg->sis64) { if (ipr_cmd->ioa_cfg->sis64)
regs = &ipr_cmd->i.ata_ioadl.regs;
ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
} else
regs = &ioarcb->u.add_data.u.regs;
ioarcb->res_handle = res->res_handle; ioarcb->res_handle = res->res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE; cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
if (ipr_is_gata(res)) {
cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
}
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
sizeof(struct ipr_ioasa_gata));
else
memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
sizeof(struct ipr_ioasa_gata));
}
LEAVE; LEAVE;
return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
} }
/**
* ipr_sata_reset - Reset the SATA port
* @link: SATA link to reset
* @classes: class of the attached device
* @deadline: unused
*
* This function issues a SATA phy reset to the affected ATA link.
*
* Return value:
* 0 on success / non-zero on failure
**/
static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
struct ipr_sata_port *sata_port = link->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
int rc = -ENXIO, ret;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
res = sata_port->res;
if (res) {
rc = ipr_device_reset(ioa_cfg, res);
*classes = res->ata_class;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
if (ret != SUCCESS) {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
}
} else
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
LEAVE;
return rc;
}
/** /**
* __ipr_eh_dev_reset - Reset the device * __ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct * @scsi_cmd: scsi command struct
...@@ -5333,12 +5048,9 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, ...@@ -5333,12 +5048,9 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
**/ **/
static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
{ {
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg; struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
struct ata_port *ap; int rc = 0;
int rc = 0, i;
struct ipr_hrr_queue *hrrq;
ENTER; ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
...@@ -5354,35 +5066,9 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) ...@@ -5354,35 +5066,9 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED; return FAILED;
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
if (!ipr_cmd->qc)
continue;
if (ipr_cmnd_is_free(ipr_cmd))
continue;
ipr_cmd->done = ipr_sata_eh_done;
if (!(ipr_cmd->qc->flags & ATA_QCFLAG_EH)) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_EH;
}
}
}
spin_unlock(&hrrq->_lock);
}
res->resetting_device = 1; res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
if (ipr_is_gata(res) && res->sata_port) {
ap = res->sata_port->ap;
spin_unlock_irq(scsi_cmd->device->host->host_lock);
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
} else
rc = ipr_device_reset(ioa_cfg, res); rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0; res->resetting_device = 0;
res->reset_occurred = 1; res->reset_occurred = 1;
...@@ -5407,12 +5093,8 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) ...@@ -5407,12 +5093,8 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
rc = __ipr_eh_dev_reset(cmd); rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock); spin_unlock_irq(cmd->device->host->host_lock);
if (rc == SUCCESS) { if (rc == SUCCESS)
if (ipr_is_gata(res) && res->sata_port)
rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
else
rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
}
return rc; return rc;
} }
...@@ -6564,7 +6246,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost, ...@@ -6564,7 +6246,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb; struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd; struct ipr_cmnd *ipr_cmd;
unsigned long hrrq_flags, lock_flags; unsigned long hrrq_flags;
int rc; int rc;
struct ipr_hrr_queue *hrrq; struct ipr_hrr_queue *hrrq;
int hrrq_id; int hrrq_id;
...@@ -6574,13 +6256,6 @@ static int ipr_queuecommand(struct Scsi_Host *shost, ...@@ -6574,13 +6256,6 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
scsi_cmd->result = (DID_OK << 16); scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata; res = scsi_cmd->device->hostdata;
if (ipr_is_gata(res) && res->sata_port) {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return rc;
}
hrrq_id = ipr_get_hrrq_index(ioa_cfg); hrrq_id = ipr_get_hrrq_index(ioa_cfg);
hrrq = &ioa_cfg->hrrq[hrrq_id]; hrrq = &ioa_cfg->hrrq[hrrq_id];
...@@ -6690,30 +6365,6 @@ static int ipr_queuecommand(struct Scsi_Host *shost, ...@@ -6690,30 +6365,6 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
return 0; return 0;
} }
/**
* ipr_ioctl - IOCTL handler
* @sdev: scsi device struct
* @cmd: IOCTL cmd
* @arg: IOCTL arg
*
* Return value:
* 0 on success / other on failure
**/
static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
struct ipr_resource_entry *res;
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ipr_is_gata(res)) {
if (cmd == HDIO_GET_IDENTITY)
return -ENOTTY;
return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
}
return -EINVAL;
}
/** /**
* ipr_ioa_info - Get information about the card/driver * ipr_ioa_info - Get information about the card/driver
* @host: scsi host struct * @host: scsi host struct
...@@ -6740,12 +6391,7 @@ static const struct scsi_host_template driver_template = { ...@@ -6740,12 +6391,7 @@ static const struct scsi_host_template driver_template = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = "IPR", .name = "IPR",
.info = ipr_ioa_info, .info = ipr_ioa_info,
.ioctl = ipr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ipr_ioctl,
#endif
.queuecommand = ipr_queuecommand, .queuecommand = ipr_queuecommand,
.dma_need_drain = ata_scsi_dma_need_drain,
.eh_abort_handler = ipr_eh_abort, .eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset, .eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset, .eh_host_reset_handler = ipr_eh_host_reset,
...@@ -6753,7 +6399,6 @@ static const struct scsi_host_template driver_template = { ...@@ -6753,7 +6399,6 @@ static const struct scsi_host_template driver_template = {
.slave_configure = ipr_slave_configure, .slave_configure = ipr_slave_configure,
.slave_destroy = ipr_slave_destroy, .slave_destroy = ipr_slave_destroy,
.scan_finished = ipr_scan_finished, .scan_finished = ipr_scan_finished,
.target_alloc = ipr_target_alloc,
.target_destroy = ipr_target_destroy, .target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth, .change_queue_depth = ipr_change_queue_depth,
.bios_param = ipr_biosparam, .bios_param = ipr_biosparam,
...@@ -6767,418 +6412,6 @@ static const struct scsi_host_template driver_template = { ...@@ -6767,418 +6412,6 @@ static const struct scsi_host_template driver_template = {
.proc_name = IPR_NAME, .proc_name = IPR_NAME,
}; };
/**
* ipr_ata_phy_reset - libata phy_reset handler
* @ap: ata port to reset
*
**/
static void ipr_ata_phy_reset(struct ata_port *ap)
{
unsigned long flags;
struct ipr_sata_port *sata_port = ap->private_data;
struct ipr_resource_entry *res = sata_port->res;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
int rc;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
goto out_unlock;
rc = ipr_device_reset(ioa_cfg, res);
if (rc) {
ap->link.device[0].class = ATA_DEV_NONE;
goto out_unlock;
}
ap->link.device[0].class = res->ata_class;
if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
ap->link.device[0].class = ATA_DEV_NONE;
out_unlock:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
LEAVE;
}
/**
* ipr_ata_post_internal - Cleanup after an internal command
* @qc: ATA queued command
*
* Return value:
* none
**/
static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
{
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
struct ipr_hrr_queue *hrrq;
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
if (ipr_cmd->qc == qc) {
ipr_device_reset(ioa_cfg, sata_port->res);
break;
}
}
spin_unlock(&hrrq->_lock);
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
/**
* ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
* @regs: destination
* @tf: source ATA taskfile
*
* Return value:
* none
**/
static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
struct ata_taskfile *tf)
{
regs->feature = tf->feature;
regs->nsect = tf->nsect;
regs->lbal = tf->lbal;
regs->lbam = tf->lbam;
regs->lbah = tf->lbah;
regs->device = tf->device;
regs->command = tf->command;
regs->hob_feature = tf->hob_feature;
regs->hob_nsect = tf->hob_nsect;
regs->hob_lbal = tf->hob_lbal;
regs->hob_lbam = tf->hob_lbam;
regs->hob_lbah = tf->hob_lbah;
regs->ctl = tf->ctl;
}
/**
* ipr_sata_done - done function for SATA commands
* @ipr_cmd: ipr command struct
*
* This function is invoked by the interrupt handler for
* ops generated by the SCSI mid-layer to SATA devices
*
* Return value:
* none
**/
static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_resource_entry *res = sata_port->res;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
spin_lock(&ipr_cmd->hrrq->_lock);
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
sizeof(struct ipr_ioasa_gata));
else
memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
sizeof(struct ipr_ioasa_gata));
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
else
qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock(&ipr_cmd->hrrq->_lock);
ata_qc_complete(qc);
}
/**
* ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
* @ipr_cmd: ipr command struct
* @qc: ATA queued command
*
**/
static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
struct ata_queued_cmd *qc)
{
u32 ioadl_flags = 0;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
struct ipr_ioadl64_desc *last_ioadl64 = NULL;
int len = qc->nbytes;
struct scatterlist *sg;
unsigned int si;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
if (len == 0)
return;
if (qc->dma_dir == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
} else if (qc->dma_dir == DMA_FROM_DEVICE)
ioadl_flags = IPR_IOADL_FLAGS_READ;
ioarcb->data_transfer_length = cpu_to_be32(len);
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
for_each_sg(qc->sg, sg, qc->n_elem, si) {
ioadl64->flags = cpu_to_be32(ioadl_flags);
ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
ioadl64->address = cpu_to_be64(sg_dma_address(sg));
last_ioadl64 = ioadl64;
ioadl64++;
}
if (likely(last_ioadl64))
last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
}
/**
* ipr_build_ata_ioadl - Build an ATA scatter/gather list
* @ipr_cmd: ipr command struct
* @qc: ATA queued command
*
**/
static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
struct ata_queued_cmd *qc)
{
u32 ioadl_flags = 0;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
struct ipr_ioadl_desc *last_ioadl = NULL;
int len = qc->nbytes;
struct scatterlist *sg;
unsigned int si;
if (len == 0)
return;
if (qc->dma_dir == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
ioarcb->data_transfer_length = cpu_to_be32(len);
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
} else if (qc->dma_dir == DMA_FROM_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_READ;
ioarcb->read_data_transfer_length = cpu_to_be32(len);
ioarcb->read_ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
}
for_each_sg(qc->sg, sg, qc->n_elem, si) {
ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
ioadl->address = cpu_to_be32(sg_dma_address(sg));
last_ioadl = ioadl;
ioadl++;
}
if (likely(last_ioadl))
last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
}
/**
* ipr_qc_defer - Get a free ipr_cmd
* @qc: queued command
*
* Return value:
* 0 if success
**/
static int ipr_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ipr_sata_port *sata_port = ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
struct ipr_hrr_queue *hrrq;
int hrrq_id;
hrrq_id = ipr_get_hrrq_index(ioa_cfg);
hrrq = &ioa_cfg->hrrq[hrrq_id];
qc->lldd_task = NULL;
spin_lock(&hrrq->_lock);
if (unlikely(hrrq->ioa_is_dead)) {
spin_unlock(&hrrq->_lock);
return 0;
}
if (unlikely(!hrrq->allow_cmds)) {
spin_unlock(&hrrq->_lock);
return ATA_DEFER_LINK;
}
ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
if (ipr_cmd == NULL) {
spin_unlock(&hrrq->_lock);
return ATA_DEFER_LINK;
}
qc->lldd_task = ipr_cmd;
spin_unlock(&hrrq->_lock);
return 0;
}
/**
* ipr_qc_issue - Issue a SATA qc to a device
* @qc: queued command
*
* Return value:
* 0 if success
**/
static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ipr_sata_port *sata_port = ap->private_data;
struct ipr_resource_entry *res = sata_port->res;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
struct ipr_ioarcb_ata_regs *regs;
if (qc->lldd_task == NULL)
ipr_qc_defer(qc);
ipr_cmd = qc->lldd_task;
if (ipr_cmd == NULL)
return AC_ERR_SYSTEM;
qc->lldd_task = NULL;
spin_lock(&ipr_cmd->hrrq->_lock);
if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
ipr_cmd->hrrq->ioa_is_dead)) {
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock(&ipr_cmd->hrrq->_lock);
return AC_ERR_SYSTEM;
}
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
ioarcb = &ipr_cmd->ioarcb;
if (ioa_cfg->sis64) {
regs = &ipr_cmd->i.ata_ioadl.regs;
ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
} else
regs = &ioarcb->u.add_data.u.regs;
memset(regs, 0, sizeof(*regs));
ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->qc = qc;
ipr_cmd->done = ipr_sata_done;
ipr_cmd->ioarcb.res_handle = res->res_handle;
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
ipr_cmd->dma_use_sg = qc->n_elem;
if (ioa_cfg->sis64)
ipr_build_ata_ioadl64(ipr_cmd, qc);
else
ipr_build_ata_ioadl(ipr_cmd, qc);
regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
ipr_copy_sata_tf(regs, &qc->tf);
memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
switch (qc->tf.protocol) {
case ATA_PROT_NODATA:
case ATA_PROT_PIO:
break;
case ATA_PROT_DMA:
regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
break;
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
break;
case ATAPI_PROT_DMA:
regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
break;
default:
WARN_ON(1);
spin_unlock(&ipr_cmd->hrrq->_lock);
return AC_ERR_INVALID;
}
ipr_send_command(ipr_cmd);
spin_unlock(&ipr_cmd->hrrq->_lock);
return 0;
}
/**
* ipr_qc_fill_rtf - Read result TF
* @qc: ATA queued command
**/
static void ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioasa_gata *g = &sata_port->ioasa;
struct ata_taskfile *tf = &qc->result_tf;
tf->feature = g->error;
tf->nsect = g->nsect;
tf->lbal = g->lbal;
tf->lbam = g->lbam;
tf->lbah = g->lbah;
tf->device = g->device;
tf->command = g->status;
tf->hob_nsect = g->hob_nsect;
tf->hob_lbal = g->hob_lbal;
tf->hob_lbam = g->hob_lbam;
tf->hob_lbah = g->hob_lbah;
}
static struct ata_port_operations ipr_sata_ops = {
.phy_reset = ipr_ata_phy_reset,
.hardreset = ipr_sata_reset,
.post_internal_cmd = ipr_ata_post_internal,
.qc_prep = ata_noop_qc_prep,
.qc_defer = ipr_qc_defer,
.qc_issue = ipr_qc_issue,
.qc_fill_rtf = ipr_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop
};
static struct ata_port_info sata_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4_ONLY,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &ipr_sata_ops
};
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
static const u16 ipr_blocked_processors[] = { static const u16 ipr_blocked_processors[] = {
PVR_NORTHSTAR, PVR_NORTHSTAR,
...@@ -10181,7 +9414,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ...@@ -10181,7 +9414,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/libata.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/irq_poll.h> #include <linux/irq_poll.h>
...@@ -35,7 +34,6 @@ ...@@ -35,7 +34,6 @@
* This can be adjusted at runtime through sysfs device attributes. * This can be adjusted at runtime through sysfs device attributes.
*/ */
#define IPR_MAX_CMD_PER_LUN 6 #define IPR_MAX_CMD_PER_LUN 6
#define IPR_MAX_CMD_PER_ATA_LUN 1
/* /*
* IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
...@@ -197,7 +195,6 @@ ...@@ -197,7 +195,6 @@
#define IPR_LUN_RESET 0x40 #define IPR_LUN_RESET 0x40
#define IPR_TARGET_RESET 0x20 #define IPR_TARGET_RESET 0x20
#define IPR_BUS_RESET 0x10 #define IPR_BUS_RESET 0x10
#define IPR_ATA_PHY_RESET 0x80
#define IPR_ID_HOST_RR_Q 0xC4 #define IPR_ID_HOST_RR_Q 0xC4
#define IPR_QUERY_IOA_CONFIG 0xC5 #define IPR_QUERY_IOA_CONFIG 0xC5
#define IPR_CANCEL_ALL_REQUESTS 0xCE #define IPR_CANCEL_ALL_REQUESTS 0xCE
...@@ -521,7 +518,6 @@ struct ipr_cmd_pkt { ...@@ -521,7 +518,6 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_SCSICDB 0x00 #define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01 #define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02 #define IPR_RQTYPE_HCAM 0x02
#define IPR_RQTYPE_ATA_PASSTHRU 0x04
#define IPR_RQTYPE_PIPE 0x05 #define IPR_RQTYPE_PIPE 0x05
u8 reserved2; u8 reserved2;
...@@ -546,30 +542,6 @@ struct ipr_cmd_pkt { ...@@ -546,30 +542,6 @@ struct ipr_cmd_pkt {
__be16 timeout; __be16 timeout;
}__attribute__ ((packed, aligned(4))); }__attribute__ ((packed, aligned(4)));
struct ipr_ioarcb_ata_regs { /* 22 bytes */
u8 flags;
#define IPR_ATA_FLAG_PACKET_CMD 0x80
#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
u8 reserved[3];
__be16 data;
u8 feature;
u8 nsect;
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 command;
u8 reserved2[3];
u8 hob_feature;
u8 hob_nsect;
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
u8 ctl;
}__attribute__ ((packed, aligned(2)));
struct ipr_ioadl_desc { struct ipr_ioadl_desc {
__be32 flags_and_data_len; __be32 flags_and_data_len;
#define IPR_IOADL_FLAGS_MASK 0xff000000 #define IPR_IOADL_FLAGS_MASK 0xff000000
...@@ -591,15 +563,8 @@ struct ipr_ioadl64_desc { ...@@ -591,15 +563,8 @@ struct ipr_ioadl64_desc {
__be64 address; __be64 address;
}__attribute__((packed, aligned (16))); }__attribute__((packed, aligned (16)));
struct ipr_ata64_ioadl {
struct ipr_ioarcb_ata_regs regs;
u16 reserved[5];
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
}__attribute__((packed, aligned (16)));
struct ipr_ioarcb_add_data { struct ipr_ioarcb_add_data {
union { union {
struct ipr_ioarcb_ata_regs regs;
struct ipr_ioadl_desc ioadl[5]; struct ipr_ioadl_desc ioadl[5];
__be32 add_cmd_parms[10]; __be32 add_cmd_parms[10];
} u; } u;
...@@ -665,21 +630,6 @@ struct ipr_ioasa_gpdd { ...@@ -665,21 +630,6 @@ struct ipr_ioasa_gpdd {
__be32 ioa_data[2]; __be32 ioa_data[2];
}__attribute__((packed, aligned (4))); }__attribute__((packed, aligned (4)));
struct ipr_ioasa_gata {
u8 error;
u8 nsect; /* Interrupt reason */
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 status;
u8 alt_status; /* ATA CTL */
u8 hob_nsect;
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
}__attribute__((packed, aligned (4)));
struct ipr_auto_sense { struct ipr_auto_sense {
__be16 auto_sense_len; __be16 auto_sense_len;
__be16 ioa_data_len; __be16 ioa_data_len;
...@@ -713,7 +663,6 @@ struct ipr_ioasa_hdr { ...@@ -713,7 +663,6 @@ struct ipr_ioasa_hdr {
__be32 ioasc_specific; /* status code specific field */ __be32 ioasc_specific; /* status code specific field */
#define IPR_ADDITIONAL_STATUS_FMT 0x80000000 #define IPR_ADDITIONAL_STATUS_FMT 0x80000000
#define IPR_AUTOSENSE_VALID 0x40000000 #define IPR_AUTOSENSE_VALID 0x40000000
#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff #define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
#define IPR_FIELD_POINTER_MASK 0x0000ffff #define IPR_FIELD_POINTER_MASK 0x0000ffff
...@@ -727,7 +676,6 @@ struct ipr_ioasa { ...@@ -727,7 +676,6 @@ struct ipr_ioasa {
struct ipr_ioasa_vset vset; struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd; struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd; struct ipr_ioasa_gpdd gpdd;
struct ipr_ioasa_gata gata;
} u; } u;
struct ipr_auto_sense auto_sense; struct ipr_auto_sense auto_sense;
...@@ -741,7 +689,6 @@ struct ipr_ioasa64 { ...@@ -741,7 +689,6 @@ struct ipr_ioasa64 {
struct ipr_ioasa_vset vset; struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd; struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd; struct ipr_ioasa_gpdd gpdd;
struct ipr_ioasa_gata gata;
} u; } u;
struct ipr_auto_sense auto_sense; struct ipr_auto_sense auto_sense;
...@@ -1279,13 +1226,6 @@ struct ipr_bus_attributes { ...@@ -1279,13 +1226,6 @@ struct ipr_bus_attributes {
u32 max_xfer_rate; u32 max_xfer_rate;
}; };
struct ipr_sata_port {
struct ipr_ioa_cfg *ioa_cfg;
struct ata_port *ap;
struct ipr_resource_entry *res;
struct ipr_ioasa_gata ioasa;
};
struct ipr_resource_entry { struct ipr_resource_entry {
u8 needs_sync_complete:1; u8 needs_sync_complete:1;
u8 in_erp:1; u8 in_erp:1;
...@@ -1323,7 +1263,6 @@ struct ipr_resource_entry { ...@@ -1323,7 +1263,6 @@ struct ipr_resource_entry {
struct ipr_ioa_cfg *ioa_cfg; struct ipr_ioa_cfg *ioa_cfg;
struct scsi_device *sdev; struct scsi_device *sdev;
struct ipr_sata_port *sata_port;
struct list_head queue; struct list_head queue;
}; /* struct ipr_resource_entry */ }; /* struct ipr_resource_entry */
...@@ -1582,7 +1521,6 @@ struct ipr_ioa_cfg { ...@@ -1582,7 +1521,6 @@ struct ipr_ioa_cfg {
struct ipr_cmnd *reset_cmd; struct ipr_cmnd *reset_cmd;
int (*reset) (struct ipr_cmnd *); int (*reset) (struct ipr_cmnd *);
struct ata_host ata_host;
char ipr_cmd_label[8]; char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd" #define IPR_CMD_LABEL "ipr_cmd"
u32 max_cmds; u32 max_cmds;
...@@ -1604,7 +1542,6 @@ struct ipr_cmnd { ...@@ -1604,7 +1542,6 @@ struct ipr_cmnd {
union { union {
struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
struct ipr_ata64_ioadl ata_ioadl;
} i; } i;
union { union {
struct ipr_ioasa ioasa; struct ipr_ioasa ioasa;
...@@ -1612,7 +1549,6 @@ struct ipr_cmnd { ...@@ -1612,7 +1549,6 @@ struct ipr_cmnd {
} s; } s;
struct list_head queue; struct list_head queue;
struct scsi_cmnd *scsi_cmd; struct scsi_cmnd *scsi_cmd;
struct ata_queued_cmd *qc;
struct completion completion; struct completion completion;
struct timer_list timer; struct timer_list timer;
struct work_struct work; struct work_struct work;
......
...@@ -3362,8 +3362,9 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) ...@@ -3362,8 +3362,9 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device; pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status); status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id); device_id = le32_to_cpu(registerRespPayload->device_id);
pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n", pm8001_dbg(pm8001_ha, INIT,
status); "register device status %d phy_id 0x%x device_id %d\n",
status, pm8001_dev->attached_phy, device_id);
switch (status) { switch (status) {
case DEVREG_SUCCESS: case DEVREG_SUCCESS:
pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n"); pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
...@@ -4278,7 +4279,7 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, ...@@ -4278,7 +4279,7 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload)); memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1); payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id); payload.device_id = cpu_to_le32(device_id);
pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n", pm8001_dbg(pm8001_ha, INIT, "unregister device device_id %d\n",
device_id); device_id);
return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
......
...@@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode) ...@@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_ops->ll2->stop(qedi->cdev); qedi_ops->ll2->stop(qedi->cdev);
} }
cancel_delayed_work_sync(&qedi->recovery_work);
cancel_delayed_work_sync(&qedi->board_disable_work);
qedi_free_iscsi_pf_param(qedi); qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false); rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
......
...@@ -5291,6 +5291,26 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt) ...@@ -5291,6 +5291,26 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return SUCCESS; return SUCCESS;
} }
static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
{
struct scsi_device *sdp = data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
if (scmd->device == sdp)
scsi_debug_abort_cmnd(scmd);
return true;
}
/* Deletes (stops) timers or work queues of all queued commands per sdev */
static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
{
struct Scsi_Host *shost = sdp->host;
blk_mq_tagset_busy_iter(&shost->tag_set,
scsi_debug_stop_all_queued_iter, sdp);
}
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{ {
struct scsi_device *sdp = SCpnt->device; struct scsi_device *sdp = SCpnt->device;
...@@ -5300,6 +5320,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) ...@@ -5300,6 +5320,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__); sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
scsi_debug_stop_all_queued(sdp);
if (devip) if (devip)
set_bit(SDEBUG_UA_POR, devip->uas_bm); set_bit(SDEBUG_UA_POR, devip->uas_bm);
......
...@@ -299,11 +299,11 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock); ...@@ -299,11 +299,11 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
{ {
unsigned long completed_reqs; unsigned long completed_reqs, flags;
spin_lock(&hwq->cq_lock); spin_lock_irqsave(&hwq->cq_lock, flags);
completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq); completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
spin_unlock(&hwq->cq_lock); spin_unlock_irqrestore(&hwq->cq_lock, flags);
return completed_reqs; return completed_reqs;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment