Commit e270e1b2 authored by James Bottomley's avatar James Bottomley

Merge titanic.il.steeleye.com:/home/jejb/BK/scsi-target-2.6

into titanic.il.steeleye.com:/home/jejb/BK/scsi-for-linus-2.6
parents 4b8cbbf6 c045ebb7
......@@ -438,13 +438,22 @@ static int cciss_open(struct inode *inode, struct file *filep)
/*
* Root is allowed to open raw volume zero even if it's not configured
* so array config can still work. I don't think I really like this,
* so array config can still work. Root is also allowed to open any
* volume that has a LUN ID, so it can issue IOCTL to reread the
* disk information. I don't think I really like this
* but I'm already using way to many device nodes to claim another one
* for "raw controller".
*/
if (drv->nr_blocks == 0) {
if (iminor(inode) != 0)
if (iminor(inode) != 0) { /* not node 0? */
/* if not node 0 make sure it is a partition = 0 */
if (iminor(inode) & 0x0f) {
return -ENXIO;
/* if it is, make sure we have a LUN ID */
} else if (drv->LunID == 0) {
return -ENXIO;
}
}
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
......@@ -1095,13 +1104,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
}
static int cciss_revalidate(struct gendisk *disk)
{
drive_info_struct *drv = disk->private_data;
set_capacity(disk, drv->nr_blocks);
return 0;
}
/*
* revalidate_allvol is for online array config utilities. After a
* utility reconfigures the drives in the array, it can use this function
......@@ -1153,7 +1155,9 @@ static int revalidate_allvol(ctlr_info_t *host)
for (i = 0; i < NWD; i++) {
struct gendisk *disk = host->gendisk[i];
drive_info_struct *drv = &(host->drv[i]);
if (!drv->nr_blocks)
/* we must register the controller even if no disks exist */
/* this is for the online array utilities */
if (!drv->heads && i)
continue;
blk_queue_hardsect_size(host->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
......@@ -1485,13 +1489,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
}
}
} else { /* Get geometry failed */
printk(KERN_WARNING "cciss: reading geometry failed, "
"continuing with default geometry\n");
drv->block_size = block_size;
drv->nr_blocks = total_size;
drv->heads = 255;
drv->sectors = 32; // Sectors per track
drv->cylinders = total_size / 255 / 32;
printk(KERN_WARNING "cciss: reading geometry failed\n");
}
printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
drv->heads, drv->sectors, drv->cylinders);
......@@ -1520,6 +1518,7 @@ cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
*total_size, *block_size);
return;
}
static int register_new_disk(ctlr_info_t *h)
{
struct gendisk *disk;
......@@ -1663,7 +1662,9 @@ static int register_new_disk(ctlr_info_t *h)
/* setup partitions per disk */
disk = h->gendisk[logvol];
set_capacity(disk, h->drv[logvol].nr_blocks);
add_disk(disk);
/* if it's the controller it's already added */
if(logvol)
add_disk(disk);
freeret:
kfree(ld_buff);
kfree(size_buff);
......@@ -1675,6 +1676,53 @@ static int register_new_disk(ctlr_info_t *h)
logvol = -1;
goto freeret;
}
static int cciss_revalidate(struct gendisk *disk)
{
ctlr_info_t *h = get_host(disk);
drive_info_struct *drv = get_drv(disk);
int logvol;
int FOUND=0;
unsigned int block_size;
unsigned int total_size;
ReadCapdata_struct *size_buff = NULL;
InquiryData_struct *inq_buff = NULL;
for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
{
if(h->drv[logvol].LunID == drv->LunID) {
FOUND=1;
break;
}
}
if (!FOUND) return 1;
size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
if (size_buff == NULL)
{
printk(KERN_WARNING "cciss: out of memory\n");
return 1;
}
inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
if (inq_buff == NULL)
{
printk(KERN_WARNING "cciss: out of memory\n");
kfree(size_buff);
return 1;
}
cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
blk_queue_hardsect_size(h->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
kfree(size_buff);
kfree(inq_buff);
return 0;
}
/*
* Wait polling for a command to complete.
* The memory mapped FIFO is polled for the completion.
......@@ -2762,7 +2810,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
disk->fops = &cciss_fops;
disk->queue = hba[i]->queue;
disk->private_data = drv;
if( !(drv->nr_blocks))
/* we must register the controller even if no disks exist */
/* this is for the online array utilities */
if(!drv->heads && j)
continue;
blk_queue_hardsect_size(hba[i]->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
......
......@@ -28,7 +28,9 @@
through the array controller. Note in particular, neither
physical nor logical disks are presented through the scsi layer. */
#include "../scsi/scsi.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <asm/atomic.h>
#include <linux/timer.h>
......@@ -61,15 +63,8 @@ int cciss_scsi_proc_info(
int length, /* length of data in buffer */
int func); /* 0 == read, 1 == write */
int cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *));
#if 0
int cciss_scsi_abort(Scsi_Cmnd *cmd);
#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
int cciss_scsi_reset(Scsi_Cmnd *cmd, unsigned int reset_flags);
#else
int cciss_scsi_reset(Scsi_Cmnd *cmd);
#endif
#endif
int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
void (* done)(struct scsi_cmnd *));
static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
{ .name = "cciss0", .ndevices = 0 },
......@@ -82,7 +77,7 @@ static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
{ .name = "cciss7", .ndevices = 0 },
};
static Scsi_Host_Template cciss_driver_template = {
static struct scsi_host_template cciss_driver_template = {
.module = THIS_MODULE,
.name = "cciss",
.proc_name = "cciss",
......@@ -552,7 +547,7 @@ cciss_scsi_setup(int cntl_num)
static void
complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
{
Scsi_Cmnd *cmd;
struct scsi_cmnd *cmd;
ctlr_info_t *ctlr;
u64bit addr64;
ErrorInfo_struct *ei;
......@@ -565,7 +560,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
return;
}
cmd = (Scsi_Cmnd *) cp->scsi_cmd;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
ctlr = hba[cp->ctlr];
/* undo the DMA mappings */
......@@ -573,14 +568,14 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
if (cmd->use_sg) {
pci_unmap_sg(ctlr->pdev,
cmd->buffer, cmd->use_sg,
scsi_to_pci_dma_dir(cmd->sc_data_direction));
cmd->sc_data_direction);
}
else if (cmd->request_bufflen) {
addr64.val32.lower = cp->SG[0].Addr.lower;
addr64.val32.upper = cp->SG[0].Addr.upper;
pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
cmd->request_bufflen,
scsi_to_pci_dma_dir(cmd->sc_data_direction));
cmd->sc_data_direction);
}
cmd->result = (DID_OK << 16); /* host byte */
......@@ -783,9 +778,8 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c,
cp->Request.Type.Direction = direction;
/* Fill in the SG list and do dma mapping */
cciss_map_one(c->pdev, cp,
(unsigned char *) buf, bufsize,
scsi_to_pci_dma_dir(SCSI_DATA_READ));
cciss_map_one(c->pdev, cp, (unsigned char *) buf,
bufsize, DMA_FROM_DEVICE);
cp->waiting = &wait;
......@@ -799,9 +793,7 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c,
wait_for_completion(&wait);
/* undo the dma mapping */
cciss_unmap_one(c->pdev, cp, bufsize,
scsi_to_pci_dma_dir(SCSI_DATA_READ));
cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
return(0);
}
......@@ -1180,14 +1172,14 @@ cciss_scsi_info(struct Scsi_Host *sa)
}
/* cciss_scatter_gather takes a Scsi_Cmnd, (cmd), and does the pci
/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
dma mapping and fills in the scatter gather entries of the
cciss command, cp. */
static void
cciss_scatter_gather(struct pci_dev *pdev,
CommandList_struct *cp,
Scsi_Cmnd *cmd)
struct scsi_cmnd *cmd)
{
unsigned int use_sg, nsegs=0, len;
struct scatterlist *scatter = (struct scatterlist *) cmd->buffer;
......@@ -1200,7 +1192,7 @@ cciss_scatter_gather(struct pci_dev *pdev,
addr64 = (__u64) pci_map_single(pdev,
cmd->request_buffer,
cmd->request_bufflen,
scsi_to_pci_dma_dir(cmd->sc_data_direction));
cmd->sc_data_direction);
cp->SG[0].Addr.lower =
(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
......@@ -1213,7 +1205,7 @@ cciss_scatter_gather(struct pci_dev *pdev,
else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg,
scsi_to_pci_dma_dir(cmd->sc_data_direction));
cmd->sc_data_direction);
for (nsegs=0; nsegs < use_sg; nsegs++) {
addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
......@@ -1234,7 +1226,7 @@ cciss_scatter_gather(struct pci_dev *pdev,
int
cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
{
ctlr_info_t **c;
int ctlr, rc;
......@@ -1302,11 +1294,10 @@ cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
cp->Request.Type.Attribute = ATTR_SIMPLE;
switch(cmd->sc_data_direction)
{
case SCSI_DATA_WRITE: cp->Request.Type.Direction = XFER_WRITE; break;
case SCSI_DATA_READ: cp->Request.Type.Direction = XFER_READ; break;
case SCSI_DATA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
case SCSI_DATA_UNKNOWN:
case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
case DMA_BIDIRECTIONAL:
// This can happen if a buggy application does a scsi passthru
// and sets both inlen and outlen to non-zero. ( see
// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
......
......@@ -5270,7 +5270,6 @@ ahd_free(struct ahd_softc *ahd)
default:
case 5:
ahd_shutdown(ahd);
TAILQ_REMOVE(&ahd_tailq, ahd, links);
/* FALLTHROUGH */
case 4:
ahd_dmamap_unload(ahd, ahd->shared_data_dmat,
......
......@@ -5032,7 +5032,6 @@ static void __exit
ahd_linux_exit(void)
{
struct ahd_softc *ahd;
u_long l;
/*
* Shutdown DV threads before going into the SCSI mid-layer.
......@@ -5040,12 +5039,11 @@ ahd_linux_exit(void)
* kernel so that waiting for our DV threads to exit leads
* to deadlock.
*/
ahd_list_lock(&l);
TAILQ_FOREACH(ahd, &ahd_tailq, links) {
ahd_linux_kill_dv_thread(ahd);
}
ahd_list_unlock(&l);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/*
* In 2.4 we have to unregister from the PCI core _after_
......
......@@ -105,12 +105,14 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
if (ahd != NULL) {
u_long s;
TAILQ_REMOVE(&ahd_tailq, ahd, links);
ahd_list_unlock(&l);
ahd_lock(ahd, &s);
ahd_intr_enable(ahd, FALSE);
ahd_unlock(ahd, &s);
ahd_free(ahd);
}
ahd_list_unlock(&l);
} else
ahd_list_unlock(&l);
}
static int
......
......@@ -3973,7 +3973,6 @@ ahc_free(struct ahc_softc *ahc)
default:
case 5:
ahc_shutdown(ahc);
TAILQ_REMOVE(&ahc_tailq, ahc, links);
/* FALLTHROUGH */
case 4:
ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
......
......@@ -5033,7 +5033,6 @@ static void
ahc_linux_exit(void)
{
struct ahc_softc *ahc;
u_long l;
/*
* Shutdown DV threads before going into the SCSI mid-layer.
......@@ -5041,12 +5040,10 @@ ahc_linux_exit(void)
* kernel so that waiting for our DV threads to exit leads
* to deadlock.
*/
ahc_list_lock(&l);
TAILQ_FOREACH(ahc, &ahc_tailq, links) {
ahc_linux_kill_dv_thread(ahc);
}
ahc_list_unlock(&l);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/*
......
......@@ -160,12 +160,14 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
if (ahc != NULL) {
u_long s;
TAILQ_REMOVE(&ahc_tailq, ahc, links);
ahc_list_unlock(&l);
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
ahc_free(ahc);
}
ahc_list_unlock(&l);
} else
ahc_list_unlock(&l);
}
#endif /* !LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) */
......
......@@ -36,13 +36,12 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
uint16_t mb0, mb2;
uint32_t stat;
device_reg_t *reg;
uint16_t *dmp_reg;
device_reg_t __iomem *reg = ha->iobase;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2300_fw_dump *fw;
uint32_t dump_size, data_ram_cnt;
reg = ha->iobase;
risc_address = data_ram_cnt = 0;
mb0 = mb2 = 0;
flags = 0;
......@@ -91,85 +90,85 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = (uint16_t *)(reg + 0);
dmp_reg = (uint16_t __iomem *)(reg + 0);
for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x10);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
fw->risc_host_reg[cnt] = RD_REG_WORD(dmp_reg++);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x40);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x40);
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x40);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->resp_dma_reg) / 2; cnt++)
fw->resp_dma_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x50);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x00);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0xA0);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2000);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++)
fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2200);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++)
fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2400);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++)
fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2600);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++)
fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2800);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++)
fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2A00);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++)
fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2C00);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++)
fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2E00);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++)
fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x10);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++)
fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x20);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++)
fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x30);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++)
fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++);
......@@ -588,13 +587,11 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
uint32_t cnt, timer;
uint16_t risc_address;
uint16_t mb0, mb2;
device_reg_t *reg;
uint16_t *dmp_reg;
device_reg_t __iomem *reg = ha->iobase;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2100_fw_dump *fw;
reg = ha->iobase;
risc_address = 0;
mb0 = mb2 = 0;
flags = 0;
......@@ -634,79 +631,79 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
rval = QLA_FUNCTION_TIMEOUT;
}
if (rval == QLA_SUCCESS) {
dmp_reg = (uint16_t *)(reg + 0);
dmp_reg = (uint16_t __iomem *)(reg + 0);
for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x10);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (cnt == 8) {
dmp_reg = (uint16_t *)((uint8_t *)reg + 0xe0);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xe0);
}
fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++);
}
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x20);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20);
for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x00);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0xA0);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2000);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++)
fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2100);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++)
fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2200);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++)
fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2300);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++)
fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2400);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++)
fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2500);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++)
fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2600);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++)
fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->pcr, 0x2700);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++)
fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x10);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++)
fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x20);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++)
fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++);
WRT_REG_WORD(&reg->ctrl_status, 0x30);
dmp_reg = (uint16_t *)((uint8_t *)reg + 0x80);
dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++)
fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++);
......@@ -987,9 +984,7 @@ qla_uprintf(char **uiter, char *fmt, ...)
void
qla2x00_dump_regs(scsi_qla_host_t *ha)
{
device_reg_t *reg;
reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
printk("Mailbox registers:\n");
printk("scsi(%ld): mbox 0 0x%04x \n",
......
......@@ -2126,11 +2126,9 @@ typedef struct scsi_qla_host {
spinlock_t hardware_lock ____cacheline_aligned;
device_reg_t *iobase; /* Base I/O address */
device_reg_t __iomem *iobase; /* Base I/O address */
unsigned long pio_address;
unsigned long pio_length;
void * mmio_address;
unsigned long mmio_length;
#define MIN_IOBASE_LEN 0x100
/* ISP ring lock, rings, and indexes */
......
......@@ -385,7 +385,7 @@ static void
qla2x00_reset_chip(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
uint32_t cnt;
unsigned long mbx_flags = 0;
uint16_t cmd;
......@@ -539,7 +539,7 @@ static int
qla2x00_chip_diag(scsi_qla_host_t *ha)
{
int rval;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
unsigned long flags = 0;
uint16_t data;
uint32_t cnt;
......@@ -905,7 +905,7 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
int rval;
unsigned long flags = 0;
int cnt;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
spin_lock_irqsave(&ha->hardware_lock, flags);
......@@ -1192,7 +1192,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
init_cb_t *icb = ha->init_cb;
nvram_t *nv = (nvram_t *)ha->request_ring;
uint16_t *wptr = (uint16_t *)ha->request_ring;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
uint8_t timer_mode;
rval = QLA_SUCCESS;
......@@ -4271,7 +4271,7 @@ static int
qla2x00_restart_isp(scsi_qla_host_t *ha)
{
uint8_t status = 0;
device_reg_t *reg;
device_reg_t __iomem *reg = ha->iobase;
unsigned long flags = 0;
uint32_t wait_time;
......@@ -4356,7 +4356,7 @@ static void
qla2x00_reset_adapter(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
ha->flags.online = 0;
qla2x00_disable_intrs(ha);
......
......@@ -18,7 +18,7 @@
*/
static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t *);
static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t __iomem *);
/*
* qla2x00_debounce_register
* Debounce register.
......@@ -30,7 +30,7 @@ static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t *);
* register value.
*/
static __inline__ uint16_t
qla2x00_debounce_register(volatile uint16_t *addr)
qla2x00_debounce_register(volatile uint16_t __iomem *addr)
{
volatile uint16_t first;
volatile uint16_t second;
......@@ -131,10 +131,9 @@ static inline void
qla2x00_enable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
device_reg_t *reg;
device_reg_t __iomem *reg = ha->iobase;
spin_lock_irqsave(&ha->hardware_lock, flags);
reg = ha->iobase;
ha->interrupts_on = 1;
/* enable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
......@@ -147,10 +146,9 @@ static inline void
qla2x00_disable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
device_reg_t *reg;
device_reg_t __iomem *reg = ha->iobase;
spin_lock_irqsave(&ha->hardware_lock, flags);
reg = ha->iobase;
ha->interrupts_on = 0;
/* disable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, 0);
......
......@@ -338,15 +338,15 @@ qla2x00_start_scsi(srb_t *sp)
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
device_reg_t *reg;
device_reg_t __iomem *reg;
char tag[2];
/* Setup device pointers. */
ret = 0;
fclun = sp->lun_queue->fclun;
ha = fclun->fcport->ha;
cmd = sp->cmd;
reg = ha->iobase;
cmd = sp->cmd;
/* Send marker if required */
if (ha->marker_needed != 0) {
......@@ -547,7 +547,7 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
request_t *
qla2x00_req_pkt(scsi_qla_host_t *ha)
{
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
request_t *pkt = NULL;
uint16_t cnt;
uint32_t *dword_ptr;
......@@ -616,7 +616,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
request_t *
qla2x00_ms_req_pkt(scsi_qla_host_t *ha, srb_t *sp)
{
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
request_t *pkt = NULL;
uint16_t cnt, i, index;
uint32_t *dword_ptr;
......@@ -706,7 +706,7 @@ qla2x00_ms_req_pkt(scsi_qla_host_t *ha, srb_t *sp)
void
qla2x00_isp_cmd(scsi_qla_host_t *ha)
{
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
DEBUG5(printk("%s(): IOCB data:\n", __func__));
DEBUG5(qla2x00_dump_buffer(
......
......@@ -43,7 +43,7 @@ irqreturn_t
qla2100_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
scsi_qla_host_t *ha;
device_reg_t *reg;
device_reg_t __iomem *reg;
int status;
unsigned long flags;
unsigned long iter;
......@@ -127,7 +127,7 @@ irqreturn_t
qla2300_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
scsi_qla_host_t *ha;
device_reg_t *reg;
device_reg_t __iomem *reg;
int status;
unsigned long flags;
unsigned long iter;
......@@ -235,17 +235,17 @@ static void
qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
{
uint16_t cnt;
uint16_t *wptr;
device_reg_t *reg = ha->iobase;
uint16_t __iomem *wptr;
device_reg_t __iomem *reg = ha->iobase;
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
wptr = (uint16_t *)MAILBOX_REG(ha, reg, 1);
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
wptr = (uint16_t *)MAILBOX_REG(ha, reg, 8);
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
if (cnt == 4 || cnt == 5)
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
else
......@@ -277,7 +277,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint32_t mbx)
uint16_t handle_cnt;
uint16_t cnt;
uint32_t handles[5];
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
uint32_t rscn_entry, host_pid;
uint8_t rscn_queue_index;
......@@ -724,7 +724,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
void
qla2x00_process_response_queue(struct scsi_qla_host *ha)
{
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
sts_entry_t *pkt;
uint16_t handle_cnt;
uint16_t cnt;
......
......@@ -58,12 +58,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
{
int rval;
unsigned long flags = 0;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
struct timer_list tmp_intr_timer;
uint8_t abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
uint8_t io_lock_on = ha->flags.init_done;
uint16_t command;
uint16_t *iptr, *optr;
uint16_t *iptr;
uint16_t __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long mbx_flags = 0;
......@@ -101,7 +102,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
optr = (uint16_t *)MAILBOX_REG(ha, reg, 0);
optr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 0);
iptr = mcp->mb;
command = mcp->mb[0];
......@@ -109,7 +110,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
optr = (uint16_t *)MAILBOX_REG(ha, reg, 8);
optr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
if (mboxes & BIT_0)
WRT_REG_WORD(optr, *iptr);
......@@ -209,6 +210,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
/* Check whether we timed out */
if (ha->flags.mbox_int) {
uint16_t *iptr2;
DEBUG3_11(printk("qla2x00_mailbox_cmd: cmd %x completed.\n",
command);)
......@@ -223,15 +225,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
}
/* Load return mailbox registers. */
optr = mcp->mb;
iptr2 = mcp->mb;
iptr = (uint16_t *)&ha->mailbox_out[0];
mboxes = mcp->in_mb;
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
*optr = *iptr;
*iptr2 = *iptr;
mboxes >>= 1;
optr++;
iptr2++;
iptr++;
}
} else {
......
......@@ -845,11 +845,9 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
was_empty = add_to_pending_queue(ha, sp);
if ((IS_QLA2100(ha) || IS_QLA2200(ha)) && ha->flags.online) {
unsigned long flags;
device_reg_t *reg;
reg = ha->iobase;
if (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
qla2x00_process_response_queue(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
......@@ -1890,15 +1888,13 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
ha->pio_address = pio;
ha->pio_length = pio_len;
ha->mmio_address = ioremap(mmio, MIN_IOBASE_LEN);
if (!ha->mmio_address) {
ha->iobase = ioremap(mmio, MIN_IOBASE_LEN);
if (!ha->iobase) {
qla_printk(KERN_ERR, ha,
"cannot remap MMIO (%s), aborting\n", ha->pdev->slot_name);
goto iospace_error_exit;
}
ha->mmio_length = mmio_len;
ha->iobase = (device_reg_t *) ha->mmio_address;
return (0);
......@@ -1912,7 +1908,7 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
{
int ret;
device_reg_t *reg;
device_reg_t __iomem *reg;
struct Scsi_Host *host;
scsi_qla_host_t *ha;
unsigned long flags = 0;
......@@ -2225,14 +2221,11 @@ qla2x00_free_device(scsi_qla_host_t *ha)
free_irq(ha->pdev->irq, ha);
/* release io space registers */
if (ha->iobase)
iounmap(ha->iobase);
pci_release_regions(ha->pdev);
pci_disable_device(ha->pdev);
#if MEMORY_MAPPED_IO
if (ha->mmio_address)
iounmap(ha->mmio_address);
#endif
}
......
......@@ -374,10 +374,9 @@ static inline struct mbx_entry *
qla2x00_get_mbx_iocb_entry(scsi_qla_host_t *ha, uint32_t handle)
{
uint16_t cnt;
device_reg_t *reg;
device_reg_t __iomem *reg = ha->iobase;
struct mbx_entry *mbxentry;
reg = ha->iobase;
mbxentry = NULL;
if (ha->req_q_cnt < 3) {
......
......@@ -39,9 +39,7 @@ void
qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
{
uint16_t data;
device_reg_t *reg;
reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
data = RD_REG_WORD(&reg->nvram);
......@@ -73,9 +71,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
void
qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
{
device_reg_t *reg;
reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
......@@ -116,7 +112,7 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
int count;
uint16_t word;
uint32_t nv_cmd;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
......@@ -201,7 +197,7 @@ static uint16_t
qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
{
uint8_t cnt;
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
uint16_t data = 0;
uint16_t reg_data;
......@@ -243,7 +239,7 @@ qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
void
qla2x00_nv_deselect(scsi_qla_host_t *ha)
{
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
NVRAM_DELAY();
......@@ -258,7 +254,7 @@ qla2x00_nv_deselect(scsi_qla_host_t *ha)
void
qla2x00_nv_write(scsi_qla_host_t *ha, uint16_t data)
{
device_reg_t *reg = ha->iobase;
device_reg_t __iomem *reg = ha->iobase;
WRT_REG_WORD(&reg->nvram, data | NVR_SELECT);
NVRAM_DELAY();
......
......@@ -320,104 +320,113 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
{
struct scsi_device *sdev = sreq->sr_device; /* a bit ugly */
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int possible_inq_resp_len;
int count = 0;
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0;
int pass, count;
*bflags = 0;
repeat_inquiry:
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY to host %d"
" channel %d id %d lun %d\n", sdev->host->host_no,
sdev->channel, sdev->id, sdev->lun));
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = 36; /* issue conservative alloc_length */
sreq->sr_cmd_len = 0;
sreq->sr_data_direction = DMA_FROM_DEVICE;
memset(inq_result, 0, 36);
scsi_wait_req(sreq, (void *) scsi_cmd, (void *) inq_result, 36,
HZ/2 + HZ*scsi_inq_timeout, 3);
/* Perform up to 3 passes. The first pass uses a conservative
* transfer length of 36 unless sdev->inquiry_len specifies a
* different value. */
first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
try_inquiry_len = first_inquiry_len;
pass = 1;
next_pass:
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY pass %d "
"to host %d channel %d id %d lun %d, length %d\n",
pass, sdev->host->host_no, sdev->channel,
sdev->id, sdev->lun, try_inquiry_len));
/* Each pass gets up to three chances to ignore Unit Attention */
for (count = 0; count < 3; ++count) {
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) try_inquiry_len;
sreq->sr_cmd_len = 0;
sreq->sr_data_direction = DMA_FROM_DEVICE;
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: 1st INQUIRY %s with"
" code 0x%x\n", sreq->sr_result ?
"failed" : "successful", sreq->sr_result));
++count;
memset(inq_result, 0, try_inquiry_len);
scsi_wait_req(sreq, (void *) scsi_cmd, (void *) inq_result,
try_inquiry_len,
HZ/2 + HZ*scsi_inq_timeout, 3);
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
"with code 0x%x\n",
sreq->sr_result ? "failed" : "successful",
sreq->sr_result));
if (sreq->sr_result) {
if (sreq->sr_result) {
if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) != 0 &&
(sreq->sr_sense_buffer[2] & 0xf) == UNIT_ATTENTION &&
(sreq->sr_sense_buffer[12] == 0x28 ||
sreq->sr_sense_buffer[12] == 0x29) &&
sreq->sr_sense_buffer[13] == 0) {
/* not-ready to ready transition or power-on - good */
/* dpg: bogus? INQUIRY never returns UNIT_ATTENTION */
/* Supposedly, but many buggy devices do so anyway */
if (count < 3)
goto repeat_inquiry;
/* Supposedly, but many buggy devices do so anyway. */
if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) &&
(sreq->sr_sense_buffer[2] & 0xf) ==
UNIT_ATTENTION &&
(sreq->sr_sense_buffer[12] == 0x28 ||
sreq->sr_sense_buffer[12] == 0x29) &&
sreq->sr_sense_buffer[13] == 0)
continue;
}
/*
* assume no peripheral if any other sort of error
*/
return;
break;
}
/*
* Get any flags for this device.
*
* XXX add a bflags to Scsi_Device, and replace the corresponding
* bit fields in Scsi_Device, so bflags need not be passed as an
* argument.
*/
*bflags |= scsi_get_device_flags(sdev, &inq_result[8], &inq_result[16]);
possible_inq_resp_len = (unsigned char) inq_result[4] + 5;
if (BLIST_INQUIRY_36 & *bflags)
possible_inq_resp_len = 36;
else if (BLIST_INQUIRY_58 & *bflags)
possible_inq_resp_len = 58;
else if (possible_inq_resp_len > 255)
possible_inq_resp_len = 36; /* sanity */
if (sreq->sr_result == 0) {
response_len = (unsigned char) inq_result[4] + 5;
if (response_len > 255)
response_len = first_inquiry_len; /* sanity */
if (possible_inq_resp_len > 36) { /* do additional INQUIRY */
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) possible_inq_resp_len;
sreq->sr_cmd_len = 0;
sreq->sr_data_direction = DMA_FROM_DEVICE;
/*
* re-zero inq_result just to be safe.
* Get any flags for this device.
*
* XXX add a bflags to Scsi_Device, and replace the
* corresponding bit fields in Scsi_Device, so bflags
* need not be passed as an argument.
*/
memset(inq_result, 0, possible_inq_resp_len);
scsi_wait_req(sreq, (void *) scsi_cmd,
(void *) inq_result,
possible_inq_resp_len, (1+scsi_inq_timeout)*(HZ/2), 3);
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: 2nd INQUIRY"
" %s with code 0x%x\n", sreq->sr_result ?
"failed" : "successful", sreq->sr_result));
if (sreq->sr_result) {
/* if the longer inquiry has failed, flag the device
* as only accepting 36 byte inquiries and retry the
* 36 byte inquiry */
printk(KERN_INFO "scsi scan: %d byte inquiry failed"
" with code %d. Consider BLIST_INQUIRY_36 for"
" this device\n", possible_inq_resp_len,
sreq->sr_result);
*bflags = BLIST_INQUIRY_36;
goto repeat_inquiry;
*bflags = scsi_get_device_flags(sdev, &inq_result[8],
&inq_result[16]);
/* When the first pass succeeds we gain information about
* what larger transfer lengths might work. */
if (pass == 1) {
if (BLIST_INQUIRY_36 & *bflags)
next_inquiry_len = 36;
else if (BLIST_INQUIRY_58 & *bflags)
next_inquiry_len = 58;
else if (sdev->inquiry_len)
next_inquiry_len = sdev->inquiry_len;
else
next_inquiry_len = response_len;
/* If more data is available perform the second pass */
if (next_inquiry_len > try_inquiry_len) {
try_inquiry_len = next_inquiry_len;
pass = 2;
goto next_pass;
}
}
/*
* The INQUIRY can change, this means the length can change.
*/
possible_inq_resp_len = (unsigned char) inq_result[4] + 5;
if (BLIST_INQUIRY_58 & *bflags)
possible_inq_resp_len = 58;
else if (possible_inq_resp_len > 255)
possible_inq_resp_len = 36; /* sanity */
} else if (pass == 2) {
printk(KERN_INFO "scsi scan: %d byte inquiry failed. "
"Consider BLIST_INQUIRY_36 for this device\n",
try_inquiry_len);
/* If this pass failed, the third pass goes back and transfers
* the same amount as we successfully got in the first pass. */
try_inquiry_len = first_inquiry_len;
pass = 3;
goto next_pass;
}
sdev->inquiry_len = possible_inq_resp_len;
/* If the last transfer attempt got an error, assume the
* peripheral doesn't exist or is dead. */
if (sreq->sr_result)
return;
/* Don't report any more data than the device says is valid */
sdev->inquiry_len = min(try_inquiry_len, response_len);
/*
* XXX Abort if the response length is less than 36? If less than
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment