Commit d31bb16c authored by David Jeffery's avatar David Jeffery Committed by Linus Torvalds

[PATCH] ips driver 3/4: 64bit dma addressing

This large patch adds support for using 64bit addressing.

Special thanks goes to Mike Anderson who did the initial
versions of this patch.
parent 836f40cb
......@@ -461,6 +461,8 @@ static int ips_is_passthru(Scsi_Cmnd *);
static int ips_make_passthru(ips_ha_t *, Scsi_Cmnd *, ips_scb_t *, int);
static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
static void ips_scmd_buf_write(Scsi_Cmnd *scmd, void *data, unsigned int count);
static void ips_scmd_buf_read(Scsi_Cmnd *scmd, void *data, unsigned int count);
int ips_proc_info(char *, char **, off_t, int, int, int);
static int ips_host_info(ips_ha_t *, char *, off_t, int);
......@@ -553,7 +555,7 @@ ips_detect(Scsi_Host_Template *SHT) {
/* If Booting from the Manager CD, Allocate a large Flash */
/* Buffer ( so we won't need to allocate one for each adapter ). */
if ( ips_cd_boot ) {
ips_FlashData = ( char * ) __get_free_pages( GFP_ATOMIC, 7 );
ips_FlashData = ( char * ) __get_free_pages( IPS_INIT_GFP, 7 );
if (ips_FlashData == NULL) {
/* The validity of this pointer is checked in ips_make_passthru() before it is used */
printk( KERN_WARNING "ERROR: Can't Allocate Large Buffer for Flashing\n" );
......@@ -1542,6 +1544,39 @@ ips_is_passthru(Scsi_Cmnd *SC) {
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_alloc_passthru_buffer */
/* */
/* Routine Description: */
/* allocate a buffer large enough for the ioctl data if the ioctl buffer */
/* is too small or doesn't exist */
/****************************************************************************/
static int
ips_alloc_passthru_buffer(ips_ha_t *ha, int length){
void *bigger_buf;
int count;
int order;
if(ha->ioctl_data && length <= (PAGE_SIZE << ha->ioctl_order))
return 0;
/* there is no buffer or it's not big enough, allocate a new one */
for (count = PAGE_SIZE, order = 0;
count < length;
order++, count <<= 1);
bigger_buf = (void *) __get_free_pages(IPS_ATOMIC_GFP, order);
if (bigger_buf) {
/* free the old memory */
free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order);
/* use the new memory */
ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_order = order;
} else {
return -1;
}
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_make_passthru */
......@@ -1554,73 +1589,41 @@ ips_is_passthru(Scsi_Cmnd *SC) {
static int
ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
ips_passthru_t *pt;
char *buffer;
int length = 0;
METHOD_TRACE("ips_make_passthru", 1);
if(!SC->use_sg){
buffer = SC->request_buffer;
length = SC->request_bufflen;
}else{
struct scatterlist *sg = SC->request_buffer;
int i;
for(i = 0; i < SC->use_sg; i++)
length += sg[i].length;
}
if (length < sizeof(ips_passthru_t)) {
/* wrong size */
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
ips_name, ha->host_num);
return (IPS_FAILURE);
}else if(!ha->ioctl_data || length > (PAGE_SIZE << ha->ioctl_order)){
void *bigger_buf;
int count;
int order;
/* try to allocate a bigger buffer */
for (count = PAGE_SIZE, order = 0;
count < length;
order++, count <<= 1);
bigger_buf = (void *) __get_free_pages(GFP_ATOMIC, order);
if (bigger_buf) {
/* free the old memory */
free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order);
/* use the new memory */
ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_order = order;
ha->ioctl_datasize = count;
} else {
pt = (ips_passthru_t*)IPS_SG_ADDRESS(sg);
}
if(ips_alloc_passthru_buffer(ha, length)){
/* allocation failure! If ha->ioctl_data exists, use it to return
some error codes. Return a failed command to the scsi layer. */
if(ha->ioctl_data){
pt = (ips_passthru_t *)ha->ioctl_data;
ips_scmd_buf_read(SC, pt, sizeof(ips_passthru_t));
pt->BasicStatus = 0x0B;
pt->ExtendedStatus = 0x00;
SC->result = DID_ERROR << 16;
return (IPS_FAILURE);
ips_scmd_buf_write(SC, pt, sizeof(ips_passthru_t));
}
return IPS_FAILURE;
}
ha->ioctl_datasize = length;
length = 0;
for(i = 0; i < SC->use_sg; i++){
memcpy(&ha->ioctl_data[length], IPS_SG_ADDRESS(&sg[i]), sg[i].length);
length += sg[i].length;
}
pt = (ips_passthru_t *)ha->ioctl_data;
buffer = ha->ioctl_data;
}
if (!length || !buffer) {
/* no data */
DEBUG_VAR(1, "(%s%d) No passthru structure",
ips_name, ha->host_num);
return (IPS_FAILURE);
}
if (length < sizeof(ips_passthru_t)) {
/* wrong size */
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
ips_name, ha->host_num);
ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
pt = (ips_passthru_t *)ha->ioctl_data;
return (IPS_FAILURE);
}
pt = (ips_passthru_t*) buffer;
/*
* Some notes about the passthru interface used
*
......@@ -1629,26 +1632,14 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
* packet we received from the sg driver. In this
* case the CmdBSize field of the pt structure is
* used for the size of the buffer.
*
* IF the scsi op_code == 0x81 then we assume that
* we will need our own buffer and we will copy the
* data to/from the user buffer passed in the scsi
* command. The data address resides at offset 4
* in the scsi command. The length of the data resides
* at offset 8 in the scsi command.
*/
switch (pt->CoppCmd) {
case IPS_NUMCTRLS:
memcpy(buffer + sizeof(ips_passthru_t),
memcpy(ha->ioctl_data + sizeof(ips_passthru_t),
&ips_num_controllers, sizeof(int));
SC->result = DID_OK << 16;
return (IPS_SUCCESS_IMM);
case IPS_CTRLINFO:
memcpy(buffer + sizeof(ips_passthru_t),
ha, sizeof(ips_ha_t));
ips_scmd_buf_write(SC, ha->ioctl_data,
sizeof(ips_passthru_t) + sizeof(int));
SC->result = DID_OK << 16;
return (IPS_SUCCESS_IMM);
......@@ -1679,7 +1670,7 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
} /* end switch */
return (IPS_FAILURE);
}
}
/****************************************************************************/
/* Routine Name: ips_flash_copperhead */
......@@ -1717,7 +1708,7 @@ ips_flash_copperhead(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb){
pt->CoppCP.cmd.flashfw.count;
for (count = PAGE_SIZE, ha->flash_order = 0; count < datasize;
ha->flash_order++, count <<= 1);
ha->flash_data = (char *)__get_free_pages(GFP_ATOMIC, ha->flash_order);
ha->flash_data = (char *)__get_free_pages(IPS_ATOMIC_GFP, ha->flash_order);
ha->flash_datasize = 0;
}else
return IPS_FAILURE;
......@@ -1793,6 +1784,48 @@ ips_flash_bios(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){
return IPS_FAILURE;
}
/****************************************************************************/
/* */
/* Routine Name: ips_fill_scb_sg_single */
/* */
/* Routine Description: */
/* Fill in a single scb sg_list element from an address */
/* return a -1 if a breakup occured */
/****************************************************************************/
static inline int ips_fill_scb_sg_single(ips_ha_t *ha, dma_addr_t busaddr,
ips_scb_t *scb, int indx, unsigned int e_len)
{
int ret_val = 0;
if ( (scb->data_len + e_len) > ha->max_xfer) {
e_len = ha->max_xfer - scb->data_len;
scb->breakup = indx;
++scb->sg_break;
ret_val = -1;
} else {
scb->breakup = 0;
scb->sg_break = 0;
}
if (IPS_USE_ENH_SGLIST(ha)) {
scb->sg_list.enh_list[indx].address_lo =
cpu_to_le32(pci_dma_lo32(busaddr));
scb->sg_list.enh_list[indx].address_hi =
cpu_to_le32(pci_dma_hi32(busaddr));
scb->sg_list.enh_list[indx].length =
cpu_to_le32(e_len);
} else {
scb->sg_list.std_list[indx].address =
cpu_to_le32(pci_dma_lo32(busaddr));
scb->sg_list.std_list[indx].length =
cpu_to_le32(e_len);
}
++scb->sg_len;
scb->data_len += e_len;
return ret_val;
}
/****************************************************************************/
/* Routine Name: ips_flash_firmware */
/* Routine Description: */
......@@ -1800,7 +1833,7 @@ ips_flash_bios(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){
/****************************************************************************/
static int
ips_flash_firmware(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){
IPS_SG_LIST *sg_list;
IPS_SG_LIST sg_list;
uint32_t cmd_busaddr;
if(pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
......@@ -1815,12 +1848,12 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){
return IPS_FAILURE;
}
/* Save the S/G list pointer so it doesn't get clobbered */
sg_list = scb->sg_list;
sg_list.list = scb->sg_list.list;
cmd_busaddr = scb->scb_busaddr;
/* copy in the CP */
memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof(IPS_IOCTL_CMD));
/* FIX stuff that might be wrong */
scb->sg_list = sg_list;
scb->sg_list.list = sg_list.list;
scb->scb_busaddr = cmd_busaddr;
scb->bus = scb->scsi_cmd->device->channel;
scb->target_id = scb->scsi_cmd->device->id;
......@@ -1837,7 +1870,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.flashfw.buffer_addr = scb->data_busaddr;
scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
if (pt->TimeOut)
scb->timeout = pt->TimeOut;
scb->scsi_cmd->result = DID_OK <<16;
......@@ -1869,7 +1902,7 @@ ips_free_flash_copperhead(ips_ha_t *ha){
/****************************************************************************/
static int
ips_usrcmd(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb) {
IPS_SG_LIST *sg_list;
IPS_SG_LIST sg_list;
uint32_t cmd_busaddr;
METHOD_TRACE("ips_usrcmd", 1);
......@@ -1878,14 +1911,14 @@ ips_usrcmd(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb) {
return (0);
/* Save the S/G list pointer so it doesn't get clobbered */
sg_list = scb->sg_list;
sg_list.list = scb->sg_list.list;
cmd_busaddr = scb->scb_busaddr;
/* copy in the CP */
memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof(IPS_IOCTL_CMD));
memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof(IPS_DCDB_TABLE));
/* FIX stuff that might be wrong */
scb->sg_list = sg_list;
scb->sg_list.list = sg_list.list;
scb->scb_busaddr = cmd_busaddr;
scb->bus = scb->scsi_cmd->device->channel;
scb->target_id = scb->scsi_cmd->device->id;
......@@ -1905,15 +1938,6 @@ ips_usrcmd(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb) {
return (0);
if (pt->CmdBSize) {
if(!scb->scsi_cmd->use_sg){
scb->data_len = pt->CmdBSize;
scb->data_busaddr = pci_map_single(ha->pcidev,
scb->scsi_cmd->request_buffer +
sizeof(ips_passthru_t),
pt->CmdBSize,
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
} else {
scb->data_len = pt->CmdBSize;
scb->data_busaddr = pci_map_single(ha->pcidev,
ha->ioctl_data +
......@@ -1921,7 +1945,6 @@ ips_usrcmd(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb) {
pt->CmdBSize,
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
}
} else {
scb->data_busaddr = 0L;
}
......@@ -1978,9 +2001,6 @@ ips_cleanup_passthru(ips_ha_t *ha, ips_scb_t *scb) {
return ;
}
if(!scb->scsi_cmd->use_sg)
pt = (ips_passthru_t *) scb->scsi_cmd->request_buffer;
else
pt = (ips_passthru_t *) ha->ioctl_data;
/* Copy data back to the user */
......@@ -1996,14 +2016,7 @@ ips_cleanup_passthru(ips_ha_t *ha, ips_scb_t *scb) {
scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
ips_free_flash_copperhead(ha);
if(scb->scsi_cmd->use_sg){
int i, length = 0;
struct scatterlist *sg = scb->scsi_cmd->request_buffer;
for(i = 0; i < scb->scsi_cmd->use_sg; i++){
memcpy(IPS_SG_ADDRESS(&sg[i]), &ha->ioctl_data[length], sg[i].length);
length += sg[i].length;
}
}
ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
}
/****************************************************************************/
......@@ -2331,7 +2344,7 @@ ips_get_bios_version(ips_ha_t *ha, int intr) {
} else {
/* Morpheus Family - Send Command to the card */
buffer = kmalloc(0x1000, GFP_ATOMIC);
buffer = kmalloc(0x1000, IPS_ATOMIC_GFP);
if (!buffer)
return;
......@@ -2685,68 +2698,20 @@ ips_next(ips_ha_t *ha, int intr) {
scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
scsi_to_pci_dma_dir(SC->sc_data_direction));
scb->flags |= IPS_SCB_MAP_SG;
if (scb->sg_count == 1) {
if (sg_dma_len(sg) > ha->max_xfer) {
scb->breakup = 1;
scb->data_len = ha->max_xfer;
} else
scb->data_len = sg_dma_len(sg);
scb->dcdb.transfer_length = scb->data_len;
scb->data_busaddr = sg_dma_address(sg);
scb->sg_len = 0;
} else {
/* Check for the first Element being bigger than MAX_XFER */
if (sg_dma_len(&sg[0]) > ha->max_xfer) {
scb->sg_list[0].address = cpu_to_le32(sg_dma_address(&sg[0]));
scb->sg_list[0].length = ha->max_xfer;
scb->data_len = ha->max_xfer;
scb->breakup = 0;
scb->sg_break=1;
scb->sg_len = 1;
} else {
for (i = 0; i < scb->sg_count; i++) {
scb->sg_list[i].address = cpu_to_le32(sg_dma_address(&sg[i]));
scb->sg_list[i].length = cpu_to_le32(sg_dma_len(&sg[i]));
if (scb->data_len + sg_dma_len(&sg[i]) > ha->max_xfer) {
/*
* Data Breakup required
*/
scb->breakup = i;
if ( ips_fill_scb_sg_single(ha, sg_dma_address(&sg[i]),
scb, i, sg_dma_len(&sg[i])) < 0)
break;
}
scb->data_len += sg_dma_len(&sg[i]);
}
if (!scb->breakup)
scb->sg_len = scb->sg_count;
else
scb->sg_len = scb->breakup;
}
scb->dcdb.transfer_length = scb->data_len;
scb->data_busaddr = scb->sg_busaddr;
}
} else {
if (SC->request_bufflen) {
if (SC->request_bufflen > ha->max_xfer) {
/*
* Data breakup required
*/
scb->breakup = 1;
scb->data_len = ha->max_xfer;
} else {
scb->data_len = SC->request_bufflen;
}
scb->dcdb.transfer_length = scb->data_len;
scb->data_busaddr = pci_map_single(ha->pcidev, SC->request_buffer,
scb->data_len,
SC->request_bufflen,
scsi_to_pci_dma_dir(SC->sc_data_direction));
scb->flags |= IPS_SCB_MAP_SINGLE;
scb->sg_len = 0;
ips_fill_scb_sg_single(ha, scb->data_busaddr, scb, 0, SC->request_bufflen);
scb->dcdb.transfer_length = scb->data_len;
} else {
scb->data_busaddr = 0L;
scb->sg_len = 0;
......@@ -3299,118 +3264,41 @@ ips_done(ips_ha_t *ha, ips_scb_t *scb) {
*/
if ((scb->breakup) || (scb->sg_break)) {
/* we had a data breakup */
uint8_t bk_save;
bk_save = scb->breakup;
scb->breakup = 0;
mod_timer(&scb->scsi_cmd->eh_timeout, jiffies + 120 * HZ);
scb->data_len = 0;
if (scb->sg_count) {
/* S/G request */
struct scatterlist *sg;
int i;
int ips_sg_index = 0;
int sg_dma_index;
sg = scb->scsi_cmd->request_buffer;
if (scb->sg_count == 1) {
if (sg_dma_len(sg) - (bk_save * ha->max_xfer) > ha->max_xfer) {
/* Further breakup required */
scb->data_len = ha->max_xfer;
scb->data_busaddr = sg_dma_address(sg) + (bk_save * ha->max_xfer);
scb->breakup = bk_save + 1;
} else {
scb->data_len = sg_dma_len(sg) - (bk_save * ha->max_xfer);
scb->data_busaddr = sg_dma_address(sg) + (bk_save * ha->max_xfer);
}
/* Spin forward to last dma chunk */
sg_dma_index = scb->breakup;
scb->dcdb.transfer_length = scb->data_len;
scb->sg_len = 0;
} else {
/* We're here because there was MORE than one s/g unit. */
/* bk_save points to which sg unit to look at */
/* sg_break points to how far through this unit we are */
/* NOTE: We will not move from one sg to another here, */
/* just finish the one we are in. Not the most */
/* efficient, but it keeps it from getting too hacky */
/* IF sg_break is non-zero, then just work on this current sg piece, */
/* pointed to by bk_save */
if (scb->sg_break) {
scb->sg_len = 1;
scb->sg_list[0].address = sg_dma_address(&sg[bk_save])
+ ha->max_xfer*scb->sg_break;
if (ha->max_xfer > sg_dma_len(&sg[bk_save]) - ha->max_xfer * scb->sg_break)
scb->sg_list[0].length = sg_dma_len(&sg[bk_save]) - ha->max_xfer * scb->sg_break;
else
scb->sg_list[0].length = ha->max_xfer;
scb->sg_break++; /* MUST GO HERE for math below to work */
scb->data_len = scb->sg_list[0].length;;
/* Take care of possible partial on last chunk*/
ips_fill_scb_sg_single(ha, sg_dma_address(&sg[sg_dma_index]),
scb, ips_sg_index++,
sg_dma_len(&sg[sg_dma_index]));
if (sg_dma_len(&sg[bk_save]) <= ha->max_xfer * scb->sg_break ) {
scb->sg_break = 0; /* No more work in this unit */
if (( bk_save + 1 ) >= scb->sg_count)
scb->breakup = 0;
else
scb->breakup = bk_save + 1;
}
} else {
/* ( sg_break == 0 ), so this is our first look at a new sg piece */
if (sg_dma_len(&sg[bk_save]) > ha->max_xfer) {
scb->sg_list[0].address = sg_dma_address(&sg[bk_save]);
scb->sg_list[0].length = ha->max_xfer;
scb->breakup = bk_save;
scb->sg_break = 1;
scb->data_len = ha->max_xfer;
scb->sg_len = 1;
} else {
/* OK, the next sg is a short one, so loop until full */
scb->data_len = 0;
scb->sg_len = 0;
scb->sg_break = 0;
/* We're only doing full units here */
for (i = bk_save; i < scb->sg_count; i++) {
scb->sg_list[i - bk_save].address = sg_dma_address(&sg[i]);
scb->sg_list[i - bk_save].length = cpu_to_le32(sg_dma_len(&sg[i]));
if (scb->data_len + sg_dma_len(&sg[i]) > ha->max_xfer) {
scb->breakup = i; /* sneaky, if not more work, than breakup is 0 */
for (; sg_dma_index < scb->sg_count; sg_dma_index++) {
if ( ips_fill_scb_sg_single(ha, sg_dma_address(&sg[sg_dma_index]),
scb, ips_sg_index++,
sg_dma_len(&sg[sg_dma_index])) < 0)
break;
}
scb->data_len += sg_dma_len(&sg[i]);
scb->sg_len++; /* only if we didn't get too big */
}
}
}
/* Also, we need to be sure we don't queue work ( breakup != 0 )
if no more sg units for next time */
scb->dcdb.transfer_length = scb->data_len;
scb->data_busaddr = scb->sg_busaddr;
}
} else {
/* Non S/G Request */
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
IPS_DMA_DIR(scb));
if ((scb->scsi_cmd->request_bufflen - (bk_save * ha->max_xfer)) > ha->max_xfer) {
/* Further breakup required */
scb->data_len = ha->max_xfer;
scb->data_busaddr = pci_map_single(ha->pcidev,
scb->scsi_cmd->request_buffer +
(bk_save * ha->max_xfer),
scb->data_len, IPS_DMA_DIR(scb));
scb->breakup = bk_save + 1;
} else {
scb->data_len = scb->scsi_cmd->request_bufflen - (bk_save * ha->max_xfer);
scb->data_busaddr = pci_map_single(ha->pcidev,
scb->scsi_cmd->request_buffer +
(bk_save * ha->max_xfer),
scb->data_len, IPS_DMA_DIR(scb));
(void) ips_fill_scb_sg_single(ha,
scb->data_busaddr + (scb->sg_break * ha->max_xfer),
scb, 0,
scb->scsi_cmd->request_bufflen - (scb->sg_break * ha->max_xfer));
}
scb->dcdb.transfer_length = scb->data_len;
scb->sg_len = 0;
}
scb->dcdb.cmd_attribute |= ips_command_direction[scb->scsi_cmd->cmnd[0]];
if (!scb->dcdb.cmd_attribute & 0x3)
......@@ -3614,6 +3502,68 @@ ips_send_wait(ips_ha_t *ha, ips_scb_t *scb, int timeout, int intr) {
return (ret);
}
/****************************************************************************/
/* */
/* Routine Name: ips_scmd_buf_write */
/* */
/* Routine Description: */
/* Write data to Scsi_Cmnd request_buffer at proper offsets */
/****************************************************************************/
static void ips_scmd_buf_write(Scsi_Cmnd *scmd, void *data, unsigned
int count)
{
if (scmd->use_sg) {
int i;
unsigned int min_cnt, xfer_cnt;
char *cdata = (char *)data;
struct scatterlist *sg = scmd->request_buffer;
for (i = 0, xfer_cnt = 0;
(i < scmd->use_sg) && (xfer_cnt < count); i++){
if(!IPS_SG_ADDRESS(&sg[i]))
return;
min_cnt = min( count - xfer_cnt, sg[i].length);
memcpy(IPS_SG_ADDRESS(&sg[i]), &cdata[xfer_cnt],
min_cnt);
xfer_cnt += min_cnt;
}
} else {
unsigned int min_cnt = min(count, scmd->request_bufflen);
memcpy(scmd->request_buffer, data, min_cnt);
}
}
/****************************************************************************/
/* */
/* Routine Name: ips_scmd_buf_read */
/* */
/* Routine Description: */
/* Copy data from a Scsi_Cmnd to a new, linear buffer */
/****************************************************************************/
static void ips_scmd_buf_read(Scsi_Cmnd *scmd, void *data, unsigned
int count)
{
if (scmd->use_sg) {
int i;
unsigned int min_cnt, xfer_cnt;
char *cdata = (char *)data;
struct scatterlist *sg = scmd->request_buffer;
for (i = 0, xfer_cnt = 0;
(i < scmd->use_sg) && (xfer_cnt < count); i++){
if(!IPS_SG_ADDRESS(&sg[i]))
return;
min_cnt = min( count - xfer_cnt, sg[i].length);
memcpy(&cdata[xfer_cnt],IPS_SG_ADDRESS(&sg[i]),
min_cnt);
xfer_cnt += min_cnt;
}
} else {
unsigned int min_cnt = min(count, scmd->request_bufflen);
memcpy(data, scmd->request_buffer, min_cnt);
}
}
/****************************************************************************/
/* */
/* Routine Name: ips_send_cmd */
......@@ -3690,7 +3640,7 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
strncpy(inquiry.ProductId, "SERVERAID ", 16);
strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
memcpy(scb->scsi_cmd->request_buffer, &inquiry, scb->scsi_cmd->request_bufflen);
ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof(inquiry));
scb->scsi_cmd->result = DID_OK << 16;
}
......@@ -3720,15 +3670,19 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
if (!scb->sg_len) {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
scb->cmd.basic_io.enhanced_sg = 0;
scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr);
} else {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG;
scb->cmd.basic_io.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->sg_busaddr);
}
scb->cmd.basic_io.segment_4G = 0;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.log_drv = scb->target_id;
scb->cmd.basic_io.sg_count = scb->sg_len;
scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr);
if (scb->cmd.basic_io.lba)
scb->cmd.basic_io.lba = cpu_to_le32(le32_to_cpu(scb->cmd.basic_io.lba) +
......@@ -3743,7 +3697,6 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
scb->cmd.basic_io.sector_count = cpu_to_le16(256);
scb->cmd.basic_io.reserved = 0;
ret = IPS_SUCCESS;
break;
......@@ -3752,15 +3705,19 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
if (!scb->sg_len) {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
scb->cmd.basic_io.enhanced_sg = 0;
scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr);
} else {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG;
scb->cmd.basic_io.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->sg_busaddr);
}
scb->cmd.basic_io.segment_4G = 0;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.log_drv = scb->target_id;
scb->cmd.basic_io.sg_count = scb->sg_len;
scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr);
if (scb->cmd.basic_io.lba)
scb->cmd.basic_io.lba = cpu_to_le32(le32_to_cpu(scb->cmd.basic_io.lba) +
......@@ -3773,7 +3730,6 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE);
scb->cmd.basic_io.reserved = 0;
if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
/*
......@@ -3795,6 +3751,8 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
case MODE_SENSE:
scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.segment_4G = 0;
scb->cmd.basic_io.enhanced_sg = 0;
scb->data_len = sizeof(*ha->enq);
scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq,
scb->data_len, IPS_DMA_DIR(scb));
......@@ -3853,10 +3811,6 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
/* setup DCDB */
if (scb->bus > 0) {
if (!scb->sg_len)
scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
else
scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
/* If we already know the Device is Not there, no need to attempt a Command */
/* This also protects an NT FailOver Controller from getting CDB's sent to it */
......@@ -3873,14 +3827,18 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
scb->cmd.dcdb.reserved = 0;
scb->cmd.dcdb.reserved2 = 0;
scb->cmd.dcdb.reserved3 = 0;
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
TimeOut = scb->scsi_cmd->timeout_per_command;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len)
if (!scb->sg_len) {
scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
else
} else {
scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB_SG;
scb->cmd.dcdb.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
}
tapeDCDB = (IPS_DCDB_TABLE_TAPE *) &scb->dcdb; /* Use Same Data Area as Old DCDB Struct */
tapeDCDB->device_address = ((scb->bus - 1) << 4) | scb->target_id;
......@@ -3899,6 +3857,9 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
tapeDCDB->reserved_for_LUN = 0;
tapeDCDB->transfer_length = scb->data_len;
if(scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
tapeDCDB->buffer_pointer = cpu_to_le32(scb->sg_busaddr);
else
tapeDCDB->buffer_pointer = cpu_to_le32(scb->data_busaddr);
tapeDCDB->sg_count = scb->sg_len;
tapeDCDB->sense_length = sizeof(tapeDCDB->sense_info);
......@@ -3906,6 +3867,13 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
tapeDCDB->reserved = 0;
memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len);
} else {
if (!scb->sg_len) {
scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
} else {
scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
scb->cmd.dcdb.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
}
scb->dcdb.device_address = ((scb->bus - 1) << 4) | scb->target_id;
scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
......@@ -3921,6 +3889,9 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) {
scb->dcdb.transfer_length = scb->data_len;
if ( scb->dcdb.cmd_attribute & IPS_TRANSFER64K )
scb->dcdb.transfer_length = 0;
if(scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
scb->dcdb.buffer_pointer = cpu_to_le32(scb->sg_busaddr);
else
scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr);
scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
scb->dcdb.sense_length = sizeof(scb->dcdb.sense_info);
......@@ -4146,7 +4117,7 @@ ips_inquiry(ips_ha_t *ha, ips_scb_t *scb) {
strncpy(inquiry.ProductId, "SERVERAID ", 16);
strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
memcpy(scb->scsi_cmd->request_buffer, &inquiry, scb->scsi_cmd->request_bufflen);
ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof(inquiry));
return (1);
}
......@@ -4162,17 +4133,17 @@ ips_inquiry(ips_ha_t *ha, ips_scb_t *scb) {
/****************************************************************************/
static int
ips_rdcap(ips_ha_t *ha, ips_scb_t *scb) {
IPS_SCSI_CAPACITY *cap;
IPS_SCSI_CAPACITY cap;
METHOD_TRACE("ips_rdcap", 1);
if (scb->scsi_cmd->bufflen < 8)
return (0);
cap = (IPS_SCSI_CAPACITY *) scb->scsi_cmd->request_buffer;
cap.lba = cpu_to_be32(le32_to_cpu(ha->adapt->logical_drive_info.drive_info[scb->target_id].sector_count) - 1);
cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
cap->lba = cpu_to_be32(le32_to_cpu(ha->adapt->logical_drive_info.drive_info[scb->target_id].sector_count) - 1);
cap->len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof(cap));
return (1);
}
......@@ -4250,7 +4221,7 @@ ips_msense(ips_ha_t *ha, ips_scb_t *scb) {
return (0);
} /* end switch */
memcpy(scb->scsi_cmd->request_buffer, &mdata, scb->scsi_cmd->request_bufflen);
ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof(mdata));
return (1);
}
......@@ -4277,7 +4248,7 @@ ips_reqsen(ips_ha_t *ha, ips_scb_t *scb) {
reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
memcpy(scb->scsi_cmd->request_buffer, &reqsen, scb->scsi_cmd->request_bufflen);
ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof(reqsen));
return (1);
}
......@@ -4356,8 +4327,8 @@ ips_free(ips_ha_t *ha) {
static int
ips_deallocatescbs(ips_ha_t *ha, int cmds) {
if (ha->scbs) {
pci_free_consistent(ha->pcidev,sizeof(IPS_SG_LIST) * IPS_MAX_SG *
cmds, ha->scbs->sg_list, ha->scbs->sg_busaddr);
pci_free_consistent(ha->pcidev, IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
cmds, ha->scbs->sg_list.list, ha->scbs->sg_busaddr);
pci_free_consistent(ha->pcidev, sizeof(ips_scb_t) * cmds,
ha->scbs, ha->scbs->scb_busaddr);
ha->scbs = NULL;
......@@ -4377,7 +4348,7 @@ return 1;
static int
ips_allocatescbs(ips_ha_t *ha) {
ips_scb_t *scb_p;
IPS_SG_LIST* ips_sg;
IPS_SG_LIST ips_sg;
int i;
dma_addr_t command_dma, sg_dma;
......@@ -4388,9 +4359,9 @@ ips_allocatescbs(ips_ha_t *ha) {
&command_dma);
if (ha->scbs == NULL)
return 0;
ips_sg = pci_alloc_consistent(ha->pcidev, sizeof(IPS_SG_LIST) * IPS_MAX_SG *
ips_sg.list = pci_alloc_consistent(ha->pcidev, IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
ha->max_cmds, &sg_dma);
if(ips_sg == NULL){
if(ips_sg.list == NULL){
pci_free_consistent(ha->pcidev,ha->max_cmds * sizeof(ips_scb_t),ha->scbs, command_dma);
return 0;
}
......@@ -4401,8 +4372,13 @@ ips_allocatescbs(ips_ha_t *ha) {
scb_p = &ha->scbs[i];
scb_p->scb_busaddr = command_dma + sizeof(ips_scb_t) * i;
/* set up S/G list */
scb_p->sg_list = ips_sg + i * IPS_MAX_SG;
scb_p->sg_busaddr = sg_dma + sizeof(IPS_SG_LIST) * IPS_MAX_SG * i;
if (IPS_USE_ENH_SGLIST(ha)) {
scb_p->sg_list.enh_list = ips_sg.enh_list + i * IPS_MAX_SG;
scb_p->sg_busaddr = sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
} else {
scb_p->sg_list.std_list = ips_sg.std_list + i * IPS_MAX_SG;
scb_p->sg_busaddr = sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
}
/* add to the free list */
if (i < ha->max_cmds - 1) {
......@@ -4426,14 +4402,14 @@ ips_allocatescbs(ips_ha_t *ha) {
/****************************************************************************/
static void
ips_init_scb(ips_ha_t *ha, ips_scb_t *scb) {
IPS_SG_LIST *sg_list;
IPS_SG_LIST sg_list;
uint32_t cmd_busaddr, sg_busaddr;
METHOD_TRACE("ips_init_scb", 1);
if (scb == NULL)
return ;
sg_list = scb->sg_list;
sg_list.list = scb->sg_list.list;
cmd_busaddr = scb->scb_busaddr;
sg_busaddr = scb->sg_busaddr;
/* zero fill */
......@@ -4449,7 +4425,7 @@ ips_init_scb(ips_ha_t *ha, ips_scb_t *scb) {
/* set bus address of scb */
scb->scb_busaddr = cmd_busaddr;
scb->sg_busaddr = sg_busaddr;
scb->sg_list = sg_list;
scb->sg_list.list = sg_list.list;
/* Neptune Fix */
scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
......@@ -4503,8 +4479,7 @@ ips_freescb(ips_ha_t *ha, ips_scb_t *scb) {
METHOD_TRACE("ips_freescb", 1);
if(scb->flags & IPS_SCB_MAP_SG)
pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer,
scb->scsi_cmd->use_sg,
pci_unmap_sg(ha->pcidev,scb->scsi_cmd->request_buffer, scb->scsi_cmd->use_sg,
IPS_DMA_DIR(scb));
else if(scb->flags & IPS_SCB_MAP_SINGLE)
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
......@@ -5620,7 +5595,6 @@ ips_read_adapter_status(ips_ha_t *ha, int intr) {
scb->cmd.basic_io.lba = 0;
scb->cmd.basic_io.sector_count = 0;
scb->cmd.basic_io.log_drv = 0;
scb->cmd.basic_io.reserved = 0;
scb->data_len = sizeof(*ha->enq);
scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, scb->data_len,
IPS_DMA_DIR(scb));
......@@ -5665,7 +5639,6 @@ ips_read_subsystem_parameters(ips_ha_t *ha, int intr) {
scb->cmd.basic_io.lba = 0;
scb->cmd.basic_io.sector_count = 0;
scb->cmd.basic_io.log_drv = 0;
scb->cmd.basic_io.reserved = 0;
scb->data_len = sizeof(*ha->subsys);
scb->data_busaddr = pci_map_single(ha->pcidev, ha->subsys,
scb->data_len, IPS_DMA_DIR(scb));
......@@ -6992,9 +6965,20 @@ static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr )
ha->subdevice_id = subdevice_id;
ha->pcidev = pci_dev;
/*
* Set the pci_dev's dma_mask. Not all adapters support 64bit
* addressing so don't enable it if the adapter can't support
* it! Also, don't use 64bit addressing if dma addresses
* are guaranteed to be < 4G.
*/
if ( IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
!pci_set_dma_mask(ha->pcidev, (u64)0xffffffffffffffff)) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
pci_set_dma_mask(ha->pcidev, (u64)0xffffffff);
}
ha->enq = kmalloc(sizeof(IPS_ENQ), GFP_KERNEL);
ha->enq = kmalloc(sizeof(IPS_ENQ), IPS_INIT_GFP);
if (!ha->enq) {
printk(KERN_WARNING "Unable to allocate host inquiry structure\n" );
......@@ -7010,21 +6994,21 @@ static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr )
ha->adapt->hw_status_start = dma_address;
ha->dummy = (void *)(ha->adapt + 1);
ha->conf = kmalloc(sizeof(IPS_CONF), GFP_KERNEL);
ha->conf = kmalloc(sizeof(IPS_CONF), IPS_INIT_GFP);
if (!ha->conf) {
printk(KERN_WARNING "Unable to allocate host conf structure\n" );
return ips_abort_init(ha, index);
}
ha->nvram = kmalloc(sizeof(IPS_NVRAM_P5), GFP_KERNEL);
ha->nvram = kmalloc(sizeof(IPS_NVRAM_P5), IPS_INIT_GFP);
if (!ha->nvram) {
printk(KERN_WARNING "Unable to allocate host NVRAM structure\n" );
return ips_abort_init(ha, index);
}
ha->subsys = kmalloc(sizeof(IPS_SUBSYS), GFP_KERNEL);
ha->subsys = kmalloc(sizeof(IPS_SUBSYS), IPS_INIT_GFP);
if (!ha->subsys) {
printk(KERN_WARNING "Unable to allocate host subsystem structure\n" );
......@@ -7035,7 +7019,7 @@ static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr )
count < ips_ioctlsize;
ha->ioctl_order++, count <<= 1);
ha->ioctl_data = (char *) __get_free_pages(GFP_KERNEL, ha->ioctl_order);
ha->ioctl_data = (char *) __get_free_pages(IPS_INIT_GFP, ha->ioctl_order);
ha->ioctl_datasize = count;
if (!ha->ioctl_data) {
......
......@@ -59,9 +59,7 @@
extern int ips_eh_abort(Scsi_Cmnd *);
extern int ips_eh_reset(Scsi_Cmnd *);
extern int ips_queue(Scsi_Cmnd *, void (*) (Scsi_Cmnd *));
extern int ips_slave_configure(Scsi_Device *);
extern const char * ips_info(struct Scsi_Host *);
extern void do_ips(int, void *, struct pt_regs *);
/*
* Some handy macros
......@@ -70,6 +68,13 @@
#define LinuxVersionCode(x,y,z) (((x)<<16)+((y)<<8)+(z))
#endif
#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,20) || defined CONFIG_HIGHIO
#define IPS_HIGHIO
#define IPS_HIGHMEM_IO .highmem_io = 1,
#else
#define IPS_HIGHMEM_IO
#endif
#define IPS_HA(x) ((ips_ha_t *) x->hostdata)
#define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
#define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
......@@ -87,6 +92,11 @@
((IPS_IS_TROMBONE(ha) || IPS_IS_CLARINET(ha)) && \
(ips_force_memio))) ? 1 : 0)
#define IPS_HAS_ENH_SGLIST(ha) (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha))
#define IPS_USE_ENH_SGLIST(ha) ((ha)->flags & IPS_HA_ENH_SG)
#define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \
sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST))
#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,4)
#define pci_set_dma_mask(dev,mask) (1)
#define scsi_set_pci_device(sh,dev) (0)
......@@ -105,6 +115,24 @@
#define min(x,y) ((x) < (y) ? x : y)
#endif
#define pci_dma_lo32(a) (a & 0xffffffff)
#if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO)
#define IPS_ENABLE_DMA64 (1)
#define pci_dma_hi32(a) (a >> 32)
#else
#define IPS_ENABLE_DMA64 (0)
#define pci_dma_hi32(a) (0)
#endif
#if defined(__ia64__)
#define IPS_ATOMIC_GFP (GFP_DMA | GFP_ATOMIC)
#define IPS_INIT_GFP GFP_DMA
#else
#define IPS_ATOMIC_GFP GFP_ATOMIC
#define IPS_INIT_GFP GFP_KERNEL
#endif
/*
* Adapter address map equates
*/
......@@ -458,7 +486,7 @@
#endif
/*
* IBM PCI Raid Command Formats
* Raid Command Formats
*/
typedef struct {
uint8_t op_code;
......@@ -468,7 +496,8 @@ typedef struct {
uint32_t lba;
uint32_t sg_addr;
uint16_t sector_count;
uint16_t reserved;
uint8_t segment_4G;
uint8_t enhanced_sg;
uint32_t ccsar;
uint32_t cccr;
} IPS_IO_CMD, *PIPS_IO_CMD;
......@@ -519,7 +548,9 @@ typedef struct {
uint16_t reserved;
uint32_t reserved2;
uint32_t dcdb_address;
uint32_t reserved3;
uint16_t reserved3;
uint8_t segment_4G;
uint8_t enhanced_sg;
uint32_t ccsar;
uint32_t cccr;
} IPS_DCDB_CMD, *PIPS_DCDB_CMD;
......@@ -963,7 +994,20 @@ typedef struct {
typedef struct ips_sglist {
uint32_t address;
uint32_t length;
} IPS_SG_LIST, *PIPS_SG_LIST;
} IPS_STD_SG_LIST;
typedef struct ips_enh_sglist {
uint32_t address_lo;
uint32_t address_hi;
uint32_t length;
uint32_t reserved;
} IPS_ENH_SG_LIST;
typedef union {
void *list;
IPS_STD_SG_LIST *std_list;
IPS_ENH_SG_LIST *enh_list;
} IPS_SG_LIST;
typedef struct _IPS_INFOSTR {
char *buffer;
......@@ -1063,6 +1107,7 @@ typedef struct ips_ha {
char *ioctl_data; /* IOCTL data area */
uint32_t ioctl_datasize; /* IOCTL data size */
uint32_t cmd_in_progress; /* Current command in progress*/
int flags; /* */
uint8_t waitflag; /* are we waiting for cmd */
uint8_t active;
int ioctl_reset; /* IOCTL Requested Reset Flag */
......@@ -1110,7 +1155,7 @@ typedef struct ips_scb {
uint32_t sg_len;
uint32_t flags;
uint32_t op_code;
IPS_SG_LIST *sg_list;
IPS_SG_LIST sg_list;
Scsi_Cmnd *scsi_cmd;
struct ips_scb *q_next;
ips_scb_callback callback;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment