Commit c8b6de16 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (48 commits)
  [SCSI] aacraid: do not set valid bit in sense information
  [SCSI] ses: add new Enclosure ULD
  [SCSI] enclosure: add support for enclosure services
  [SCSI] sr: fix test unit ready responses
  [SCSI] u14-34f: fix data direction bug
  [SCSI] aacraid: pci_set_dma_max_seg_size opened up for late model controllers
  [SCSI] fix BUG when sum(scatterlist) > bufflen
  [SCSI] arcmsr: updates (1.20.00.15)
  [SCSI] advansys: make 3 functions static
  [SCSI] Small cleanups for scsi_host.h
  [SCSI] dc395x: fix uninitialized var warning
  [SCSI] NCR53C9x: remove driver
  [SCSI] remove m68k NCR53C9x based drivers
  [SCSI] dec_esp: Remove driver
  [SCSI] kernel-doc: fix scsi docbook
  [SCSI] update my email address
  [SCSI] add protocol definitions
  [SCSI] sd: handle bad lba in sense information
  [SCSI] qla2xxx: Update version number to 8.02.00-k8.
  [SCSI] qla2xxx: Correct issue where incorrect init-fw mailbox command was used on non-NPIV capable ISPs.
  ...
parents a6a852e9 8e31e607
......@@ -12,7 +12,7 @@
<surname>Bottomley</surname>
<affiliation>
<address>
<email>James.Bottomley@steeleye.com</email>
<email>James.Bottomley@hansenpartnership.com</email>
</address>
</affiliation>
</author>
......
......@@ -68,4 +68,45 @@
** 2. modify the arcmsr_pci_slot_reset function
** 3. modify the arcmsr_pci_ers_disconnect_forepart function
** 4. modify the arcmsr_pci_ers_need_reset_forepart function
** 1.20.00.15 09/27/2007 Erich Chen & Nick Cheng
** 1. add arcmsr_enable_eoi_mode() on adapter Type B
** 2. add readl(reg->iop2drv_doorbell_reg) in arcmsr_handle_hbb_isr()
** in case of the doorbell interrupt clearance is cached
** 1.20.00.15 10/01/2007 Erich Chen & Nick Cheng
** 1. modify acb->devstate[i][j]
** as ARECA_RAID_GOOD instead of
** ARECA_RAID_GONE in arcmsr_alloc_ccb_pool
** 1.20.00.15 11/06/2007 Erich Chen & Nick Cheng
** 1. add conditional declaration for
** arcmsr_pci_error_detected() and
** arcmsr_pci_slot_reset
** 1.20.00.15 11/23/2007 Erich Chen & Nick Cheng
** 1.check if the sg list member number
** exceeds arcmsr default limit in arcmsr_build_ccb()
** 2.change the returned value type of arcmsr_build_ccb()
** from "void" to "int"
** 3.add the conditional check if arcmsr_build_ccb()
** returns FAILED
** 1.20.00.15 12/04/2007 Erich Chen & Nick Cheng
** 1. modify arcmsr_drain_donequeue() to ignore unknown
** command and let kernel process command timeout.
** This could handle IO request violating max. segments
** while Linux XFS over DM-CRYPT.
** Thanks to Milan Broz's comments <mbroz@redhat.com>
** 1.20.00.15 12/24/2007 Erich Chen & Nick Cheng
** 1.fix the portability problems
** 2.fix type B where we should _not_ iounmap() acb->pmu;
** it's not ioremapped.
** 3.add return -ENOMEM if ioremap() fails
** 4.transfer IS_SG64_ADDR w/ cpu_to_le32()
** in arcmsr_build_ccb
** 5. modify acb->devstate[i][j] as ARECA_RAID_GONE instead of
** ARECA_RAID_GOOD in arcmsr_alloc_ccb_pool()
** 6.fix arcmsr_cdb->Context as (unsigned long)arcmsr_cdb
** 7.add the checking state of
** (outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT) == 0
** in arcmsr_handle_hba_isr
** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer()
** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool()
** 10.fix the arcmsr_polling_hbb_ccbdone()
**************************************************************************
......@@ -1407,7 +1407,7 @@ Credits
=======
The following people have contributed to this document:
Mike Anderson <andmike at us dot ibm dot com>
James Bottomley <James dot Bottomley at steeleye dot com>
James Bottomley <James dot Bottomley at hansenpartnership dot com>
Patrick Mansfield <patmans at us dot ibm dot com>
Christoph Hellwig <hch at infradead dot org>
Doug Ledford <dledford at redhat dot com>
......
......@@ -285,4 +285,13 @@ config INTEL_MENLOW
If unsure, say N.
config ENCLOSURE_SERVICES
tristate "Enclosure Services"
default n
help
Provides support for intelligent enclosures (bays which
contain storage devices). You also need either a host
driver (SCSI/ATA) which supports enclosures
or a SCSI enclosure device (SES) to use these services.
endif # MISC_DEVICES
......@@ -20,3 +20,4 @@ obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
This diff is collapsed.
......@@ -179,7 +179,15 @@ config CHR_DEV_SCH
say M here and read <file:Documentation/kbuild/modules.txt> and
<file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
If unsure, say N.
config SCSI_ENCLOSURE
tristate "SCSI Enclosure Support"
depends on SCSI && ENCLOSURE_SERVICES
help
Enclosures are devices sitting on or in SCSI backplanes that
manage devices. If you have a disk cage, the chances are that
it has an enclosure device. Selecting this option will just allow
certain enclosure conditions to be reported and is not required.
comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
depends on SCSI
......@@ -350,17 +358,6 @@ config SGIWD93_SCSI
If you have a Western Digital WD93 SCSI controller on
an SGI MIPS system, say Y. Otherwise, say N.
config SCSI_DECNCR
tristate "DEC NCR53C94 Scsi Driver"
depends on MACH_DECSTATION && SCSI && TC
help
Say Y here to support the NCR53C94 SCSI controller chips on IOASIC
based TURBOchannel DECstations and TURBOchannel PMAZ-A cards.
config SCSI_DECSII
tristate "DEC SII Scsi Driver"
depends on MACH_DECSTATION && SCSI && 32BIT
config BLK_DEV_3W_XXXX_RAID
tristate "3ware 5/6/7/8xxx ATA-RAID support"
depends on PCI && SCSI
......@@ -1263,17 +1260,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
not allow targets to disconnect is not reasonable if there is more
than 1 device on a SCSI bus. The normal answer therefore is N.
config SCSI_MCA_53C9X
tristate "NCR MCA 53C9x SCSI support"
depends on MCA_LEGACY && SCSI && BROKEN_ON_SMP
help
Some MicroChannel machines, notably the NCR 35xx line, use a SCSI
controller based on the NCR 53C94. This driver will allow use of
the controller on the 3550, and very possibly others.
To compile this driver as a module, choose M here: the
module will be called mca_53c9x.
config SCSI_PAS16
tristate "PAS16 SCSI support"
depends on ISA && SCSI
......@@ -1600,45 +1586,6 @@ config GVP11_SCSI
To compile this driver as a module, choose M here: the
module will be called gvp11.
config CYBERSTORM_SCSI
tristate "CyberStorm SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga with an original (MkI) Phase5 Cyberstorm
accelerator board and the optional Cyberstorm SCSI controller,
answer Y. Otherwise, say N.
config CYBERSTORMII_SCSI
tristate "CyberStorm Mk II SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga with a Phase5 Cyberstorm MkII accelerator board
and the optional Cyberstorm SCSI controller, say Y. Otherwise,
answer N.
config BLZ2060_SCSI
tristate "Blizzard 2060 SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga with a Phase5 Blizzard 2060 accelerator board
and want to use the onboard SCSI controller, say Y. Otherwise,
answer N.
config BLZ1230_SCSI
tristate "Blizzard 1230IV/1260 SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga 1200 with a Phase5 Blizzard 1230IV or Blizzard
1260 accelerator, and the optional SCSI module, say Y. Otherwise,
say N.
config FASTLANE_SCSI
tristate "Fastlane SCSI support"
depends on ZORRO && SCSI
help
If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
one in the near future, say Y to this question. Otherwise, say N.
config SCSI_A4000T
tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
depends on AMIGA && SCSI && EXPERIMENTAL
......@@ -1666,15 +1613,6 @@ config SCSI_ZORRO7XX
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
config OKTAGON_SCSI
tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
depends on ZORRO && SCSI && EXPERIMENTAL
help
If you have the BSC Oktagon SCSI disk controller for the Amiga, say
Y to this question. If you're in doubt about whether you have one,
see the picture at
<http://amiga.resource.cx/exp/search.pl?product=oktagon>.
config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
......@@ -1727,18 +1665,6 @@ config MAC_SCSI
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes. If you have one of these say Y and read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
To compile this driver as a module, choose M here: the
module will be called mac_esp.
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
depends on MVME147 && SCSI=y
......@@ -1779,6 +1705,7 @@ config SUN3_SCSI
config SUN3X_ESP
bool "Sun3x ESP SCSI"
depends on SUN3X && SCSI=y
select SCSI_SPI_ATTRS
help
The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it.
......
......@@ -44,15 +44,8 @@ obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
obj-$(CONFIG_CYBERSTORM_SCSI) += NCR53C9x.o cyberstorm.o
obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
......@@ -95,7 +88,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
obj-$(CONFIG_SCSI_MCA_53C9X) += NCR53C9x.o mca_53c9x.o
obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o
obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
......@@ -112,13 +104,12 @@ obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_PPA) += ppa.o
obj-$(CONFIG_SCSI_IMM) += imm.o
obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
......@@ -138,6 +129,7 @@ obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
obj-$(CONFIG_CHR_DEV_SG) += sg.o
obj-$(CONFIG_CHR_DEV_SCH) += ch.o
obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -859,44 +859,31 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
u8 a_sense_code, u8 incorrect_length,
u8 bit_pointer, u16 field_pointer,
u32 residue)
static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
{
sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
u8 *sense_buf = (u8 *)sense_data;
/* Sense data valid, err code 70h */
sense_buf[0] = 0x70; /* No info field */
sense_buf[1] = 0; /* Segment number, always zero */
if (incorrect_length) {
sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
sense_buf[3] = BYTE3(residue);
sense_buf[4] = BYTE2(residue);
sense_buf[5] = BYTE1(residue);
sense_buf[6] = BYTE0(residue);
} else
sense_buf[2] = sense_key; /* Sense key */
if (sense_key == ILLEGAL_REQUEST)
sense_buf[7] = 10; /* Additional sense length */
else
sense_buf[7] = 6; /* Additional sense length */
sense_buf[2] = sense_key; /* Sense key */
sense_buf[12] = sense_code; /* Additional sense code */
sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
if (sense_key == ILLEGAL_REQUEST) {
sense_buf[15] = 0;
sense_buf[7] = 10; /* Additional sense length */
if (sense_code == SENCODE_INVALID_PARAM_FIELD)
sense_buf[15] = 0x80;/* Std sense key specific field */
sense_buf[15] = bit_pointer;
/* Illegal parameter is in the parameter block */
if (sense_code == SENCODE_INVALID_CDB_FIELD)
sense_buf[15] = 0xc0;/* Std sense key specific field */
sense_buf[15] |= 0xc0;/* Std sense key specific field */
/* Illegal parameter is in the CDB block */
sense_buf[15] |= bit_pointer;
sense_buf[16] = field_pointer >> 8; /* MSB */
sense_buf[17] = field_pointer; /* LSB */
}
} else
sense_buf[7] = 6; /* Additional sense length */
}
static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
......@@ -906,11 +893,9 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
......@@ -1520,11 +1505,9 @@ static void io_callback(void *context, struct fib * fibptr)
le32_to_cpu(readreply->status));
#endif
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
......@@ -1733,11 +1716,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
le32_to_cpu(synchronizereply->status));
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
......@@ -1945,10 +1926,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST,
SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
......@@ -1995,10 +1975,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST,
SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 0, 7, 2, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 7, 2);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
min_t(size_t,
......@@ -2254,9 +2233,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
......
......@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
......@@ -251,24 +252,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the AdapterFibContext from the Input parameters.
*/
if (fibctx->unique == f.fibctx) { /* We found a winner */
if (fibctx->unique == f.fibctx) { /* We found a winner */
break;
}
entry = entry->next;
fibctx = NULL;
}
if (!fibctx) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n"));
return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
return -EINVAL;
}
status = 0;
spin_lock_irqsave(&dev->fib_lock, flags);
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
......@@ -414,8 +416,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
* @arg: ioctl arguments
*
* This routine returns the driver version.
* Under Linux, there have been no version incompatibilities, so this is
* simple!
* Under Linux, there have been no version incompatibilities, so this is
* simple!
*/
static int check_revision(struct aac_dev *dev, void __user *arg)
......@@ -463,7 +465,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
u32 sg_indx = 0;
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
......@@ -517,11 +519,11 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
......@@ -786,9 +788,9 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
return -EFAULT;
if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
return -EFAULT;
}
return 0;
}
......
......@@ -1130,31 +1130,29 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (error < 0)
goto out_deinit;
if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) {
error = pci_set_dma_max_seg_size(pdev, 65536);
if (error)
goto out_deinit;
}
/*
* Lets override negotiations and drop the maximum SG limit to 34
*/
if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
(aac->scsi_host_ptr->sg_tablesize > 34)) {
aac->scsi_host_ptr->sg_tablesize = 34;
aac->scsi_host_ptr->max_sectors
= (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
(shost->sg_tablesize > 34)) {
shost->sg_tablesize = 34;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
(aac->scsi_host_ptr->sg_tablesize > 17)) {
aac->scsi_host_ptr->sg_tablesize = 17;
aac->scsi_host_ptr->max_sectors
= (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
(shost->sg_tablesize > 17)) {
shost->sg_tablesize = 17;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
error = pci_set_dma_max_seg_size(pdev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
(shost->max_sectors << 9) : 65536);
if (error)
goto out_deinit;
/*
* Firware printf works only with older firmware.
* Firmware printf works only with older firmware.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
aac->printf_enabled = 1;
......
......@@ -12261,7 +12261,7 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
/*
* Write the EEPROM from 'cfg_buf'.
*/
void __devinit
static void __devinit
AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
{
ushort *wbuf;
......@@ -12328,7 +12328,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
void __devinit
static void __devinit
AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort *wbuf;
......@@ -12395,7 +12395,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
void __devinit
static void __devinit
AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort *wbuf;
......
......@@ -48,7 +48,7 @@ struct class_device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 320
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/08/30"
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
......@@ -248,6 +248,7 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
......@@ -256,6 +257,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
......
......@@ -315,9 +315,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
(0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
}
reg = (struct MessageUnit_B *)(dma_coherent +
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
dma_addr = dma_coherent_handle;
ccb_tmp = (struct CommandControlBlock *)dma_coherent;
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
......@@ -371,8 +368,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
out:
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20,
acb->dma_coherent, acb->dma_coherent_handle);
(ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
return -ENOMEM;
}
......@@ -509,6 +506,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
, reg->iop2drv_doorbell_reg);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
return 0x00;
}
msleep(10);
......@@ -748,6 +746,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla
, ccb->startdone
, atomic_read(&acb->ccboutstandingcount));
}
else
arcmsr_report_ccb_state(acb, ccb, flag_ccb);
}
......@@ -886,7 +885,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
}
}
static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
{
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
......@@ -906,6 +905,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
nseg = scsi_dma_map(pcmd);
if (nseg > ARCMSR_MAX_SG_ENTRIES)
return FAILED;
BUG_ON(nseg < 0);
if (nseg) {
......@@ -946,6 +947,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
ccb->ccb_flags |= CCB_FLAG_WRITE;
}
return SUCCESS;
}
static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
......@@ -1036,18 +1038,22 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
iounmap(acb->pmuA);
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
acb->dma_coherent,
acb->dma_coherent_handle);
break;
}
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
dma_free_coherent(&acb->pdev->dev,
(ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
}
}
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
acb->dma_coherent,
acb->dma_coherent_handle);
}
void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
......@@ -1273,7 +1279,9 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
return 1;
writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
/*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
readl(reg->iop2drv_doorbell_reg);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
......@@ -1380,12 +1388,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
case ARCMSR_MESSAGE_READ_RQBUFFER: {
unsigned long *ver_addr;
dma_addr_t buf_handle;
uint8_t *pQbuffer, *ptmpQbuffer;
int32_t allxfer_len = 0;
void *tmp;
ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
if (!ver_addr) {
tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
ver_addr = (unsigned long *)tmp;
if (!tmp) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
......@@ -1421,18 +1430,19 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
pcmdmessagefld->cmdmessage.Length = allxfer_len;
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
kfree(tmp);
}
break;
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned long *ver_addr;
dma_addr_t buf_handle;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
void *tmp;
ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
if (!ver_addr) {
tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
ver_addr = (unsigned long *)tmp;
if (!tmp) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
......@@ -1482,7 +1492,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
retvalue = ARCMSR_MESSAGE_FAIL;
}
}
pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
kfree(tmp);
}
break;
......@@ -1682,8 +1692,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
ccb = arcmsr_get_freeccb(acb);
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
arcmsr_build_ccb(acb, ccb, cmd);
if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
cmd->scsi_done(cmd);
return 0;
}
arcmsr_post_ccb(acb, ccb);
return 0;
}
......@@ -1844,7 +1857,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
}
}
static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_B *reg = acb->pmuB;
......@@ -1878,7 +1891,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
poll_ccb_done = (ccb == poll_ccb) ? 1:0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if (ccb->startdone == ARCMSR_CCB_ABORTED) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: \
scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
,acb->host->host_no
......@@ -1901,7 +1914,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
} /*drain reply FIFO*/
}
static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \
static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
switch (acb->adapter_type) {
......@@ -2026,6 +2039,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
do {
firmware_state = readl(reg->iop2drv_doorbell_reg);
} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
}
break;
}
......@@ -2090,19 +2104,39 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
return;
case ACB_ADAPTER_TYPE_B:
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
if(arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
return;
}
}
break;
}
return;
}
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
arcmsr_wait_firmware_ready(acb);
arcmsr_iop_confirm(acb);
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_wait_firmware_ready(acb);
arcmsr_iop_confirm(acb);
arcmsr_get_firmware_spec(acb);
/*start background rebuild*/
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;
......@@ -2275,6 +2309,7 @@ static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;
......
......@@ -1790,7 +1790,7 @@ int acornscsi_starttransfer(AS_Host *host)
return 0;
}
residual = host->SCpnt->request_bufflen - host->scsi.SCp.scsi_xferred;
residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
sbic_arm_writenext(host->scsi.io_port, residual >> 16);
......@@ -2270,7 +2270,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4b: /* -> PHASE_STATUSIN */
case 0x8b: /* -> PHASE_STATUSIN */
/* DATA IN -> STATUS */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_readstatusbyte(host);
......@@ -2281,7 +2281,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4e: /* -> PHASE_MSGOUT */
case 0x8e: /* -> PHASE_MSGOUT */
/* DATA IN -> MESSAGE OUT */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_sendmessage(host);
......@@ -2291,7 +2291,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4f: /* message in */
case 0x8f: /* message in */
/* DATA IN -> MESSAGE IN */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
......@@ -2319,7 +2319,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4b: /* -> PHASE_STATUSIN */
case 0x8b: /* -> PHASE_STATUSIN */
/* DATA OUT -> STATUS */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
......@@ -2331,7 +2331,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4e: /* -> PHASE_MSGOUT */
case 0x8e: /* -> PHASE_MSGOUT */
/* DATA OUT -> MESSAGE OUT */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
......@@ -2342,7 +2342,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4f: /* message in */
case 0x8f: /* message in */
/* DATA OUT -> MESSAGE IN */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
......
......@@ -18,17 +18,32 @@
* The scatter-gather list handling. This contains all
* the yucky stuff that needs to be fixed properly.
*/
/*
* copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
* entries of uninitialized memory. SCp is from scsi-ml and has a valid
* (possibly chained) sg-list
*/
static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
{
int bufs = SCp->buffers_residual;
/* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
* and to remove this BUG_ON. Use min() in-its-place
*/
BUG_ON(bufs + 1 > max);
sg_set_buf(sg, SCp->ptr, SCp->this_residual);
if (bufs)
memcpy(sg + 1, SCp->buffer + 1,
sizeof(struct scatterlist) * bufs);
if (bufs) {
struct scatterlist *src_sg;
unsigned i;
for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
*(++sg) = *src_sg;
sg_mark_end(sg);
}
return bufs + 1;
}
......@@ -36,7 +51,7 @@ static inline int next_SCp(struct scsi_pointer *SCp)
{
int ret = SCp->buffers_residual;
if (ret) {
SCp->buffer++;
SCp->buffer = sg_next(SCp->buffer);
SCp->buffers_residual--;
SCp->ptr = sg_virt(SCp->buffer);
SCp->this_residual = SCp->buffer->length;
......@@ -68,46 +83,46 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
{
memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
if (SCpnt->use_sg) {
if (scsi_bufflen(SCpnt)) {
unsigned long len = 0;
int buf;
SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
SCpnt->SCp.phase = SCpnt->request_bufflen;
SCpnt->SCp.phase = scsi_bufflen(SCpnt);
#ifdef BELT_AND_BRACES
/*
* Calculate correct buffer length. Some commands
* come in with the wrong request_bufflen.
*/
for (buf = 0; buf <= SCpnt->SCp.buffers_residual; buf++)
len += SCpnt->SCp.buffer[buf].length;
if (SCpnt->request_bufflen != len)
printk(KERN_WARNING "scsi%d.%c: bad request buffer "
"length %d, should be %ld\n", SCpnt->device->host->host_no,
'0' + SCpnt->device->id, SCpnt->request_bufflen, len);
SCpnt->request_bufflen = len;
{ /*
* Calculate correct buffer length. Some commands
* come in with the wrong scsi_bufflen.
*/
struct scatterlist *sg;
unsigned i, sg_count = scsi_sg_count(SCpnt);
scsi_for_each_sg(SCpnt, sg, sg_count, i)
len += sg->length;
if (scsi_bufflen(SCpnt) != len) {
printk(KERN_WARNING
"scsi%d.%c: bad request buffer "
"length %d, should be %ld\n",
SCpnt->device->host->host_no,
'0' + SCpnt->device->id,
scsi_bufflen(SCpnt), len);
/*
* FIXME: Totaly naive fixup. We should abort
* with error
*/
SCpnt->SCp.phase =
min_t(unsigned long, len,
scsi_bufflen(SCpnt));
}
}
#endif
} else {
SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
SCpnt->SCp.this_residual = SCpnt->request_bufflen;
SCpnt->SCp.phase = SCpnt->request_bufflen;
}
/*
* If the upper SCSI layers pass a buffer, but zero length,
* we aren't interested in the buffer pointer.
*/
if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.ptr) {
#if 0 //def BELT_AND_BRACES
printk(KERN_WARNING "scsi%d.%c: zero length buffer passed for "
"command ", SCpnt->host->host_no, '0' + SCpnt->target);
__scsi_print_command(SCpnt->cmnd);
#endif
SCpnt->SCp.ptr = NULL;
SCpnt->SCp.this_residual = 0;
SCpnt->SCp.phase = 0;
}
}
/* blz1230.c: Driver for Blizzard 1230 SCSI IV Controller.
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on the CyberStorm driver, hence the occasional
* reference to CyberStorm.
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
#define MKIV 1
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define BLZ1230_ESP_ADDR 0x8000
#define BLZ1230_DMA_ADDR 0x10000
#define BLZ1230II_ESP_ADDR 0x10000
#define BLZ1230II_DMA_ADDR 0x10021
/* The Blizzard 1230 DMA interface
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Only two things can be programmed in the Blizzard DMA:
* 1) The data direction is controlled by the status of bit 31 (1 = write)
* 2) The source/dest address (word aligned, shifted one right) in bits 30-0
*
* Program DMA by first latching the highest byte of the address/direction
* (i.e. bits 31-24 of the long word constructed as described in steps 1+2
* above). Then write each byte of the address/direction (starting with the
* top byte, working down) to the DMA address register.
*
* Figure out interrupt status by reading the ESP status byte.
*/
struct blz1230_dma_registers {
volatile unsigned char dma_addr; /* DMA address [0x0000] */
unsigned char dmapad2[0x7fff];
volatile unsigned char dma_latch; /* DMA latch [0x8000] */
};
struct blz1230II_dma_registers {
volatile unsigned char dma_addr; /* DMA address [0x0000] */
unsigned char dmapad2[0xf];
volatile unsigned char dma_latch; /* DMA latch [0x0010] */
};
#define BLZ1230_DMA_WRITE 0x80000000
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
struct ESP_regs *eregs;
unsigned long board;
#if MKIV
#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260
#define REAL_BLZ1230_ESP_ADDR BLZ1230_ESP_ADDR
#define REAL_BLZ1230_DMA_ADDR BLZ1230_DMA_ADDR
#else
#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060
#define REAL_BLZ1230_ESP_ADDR BLZ1230II_ESP_ADDR
#define REAL_BLZ1230_DMA_ADDR BLZ1230II_DMA_ADDR
#endif
if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) {
board = z->resource.start;
if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/* Do some magic to figure out if the blizzard is
* equipped with a SCSI controller
*/
address = ZTWO_VADDR(board);
eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR,
0);
esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
udelay(5);
if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
goto err_out;
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = 0;
esp->dma_led_off = 0;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the Blizzard are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR);
/* ESP register base */
esp->eregs = eregs;
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+REAL_BLZ1230_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"Blizzard 1230 SCSI IV", esp->ehost))
goto err_out;
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
err_out:
scsi_unregister(esp->ehost);
esp_deallocate(esp);
release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
sizeof(struct ESP_regs));
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the Blizzard DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the Blizzard DMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
#if MKIV
struct blz1230_dma_registers *dregs =
(struct blz1230_dma_registers *) (esp->dregs);
#else
struct blz1230II_dma_registers *dregs =
(struct blz1230II_dma_registers *) (esp->dregs);
#endif
cache_clear(addr, length);
addr >>= 1;
addr &= ~(BLZ1230_DMA_WRITE);
/* First set latch */
dregs->dma_latch = (addr >> 24) & 0xff;
/* Then pump the address to the DMA address register */
#if MKIV
dregs->dma_addr = (addr >> 24) & 0xff;
#endif
dregs->dma_addr = (addr >> 16) & 0xff;
dregs->dma_addr = (addr >> 8) & 0xff;
dregs->dma_addr = (addr ) & 0xff;
}
void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
#if MKIV
struct blz1230_dma_registers *dregs =
(struct blz1230_dma_registers *) (esp->dregs);
#else
struct blz1230II_dma_registers *dregs =
(struct blz1230II_dma_registers *) (esp->dregs);
#endif
cache_push(addr, length);
addr >>= 1;
addr |= BLZ1230_DMA_WRITE;
/* First set latch */
dregs->dma_latch = (addr >> 24) & 0xff;
/* Then pump the address to the DMA address register */
#if MKIV
dregs->dma_addr = (addr >> 24) & 0xff;
#endif
dregs->dma_addr = (addr >> 16) & 0xff;
dregs->dma_addr = (addr >> 8) & 0xff;
dregs->dma_addr = (addr ) & 0xff;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int blz1230_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-blz1230",
.proc_info = esp_proc_info,
.name = "Blizzard1230 SCSI IV",
.detect = blz1230_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = blz1230_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");
/* blz2060.c: Driver for Blizzard 2060 SCSI Controller.
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on the CyberStorm driver, hence the occasional
* reference to CyberStorm.
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define BLZ2060_ESP_ADDR 0x1ff00
#define BLZ2060_DMA_ADDR 0x1ffe0
/* The Blizzard 2060 DMA interface
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Only two things can be programmed in the Blizzard DMA:
* 1) The data direction is controlled by the status of bit 31 (1 = write)
* 2) The source/dest address (word aligned, shifted one right) in bits 30-0
*
* Figure out interrupt status by reading the ESP status byte.
*/
struct blz2060_dma_registers {
volatile unsigned char dma_led_ctrl; /* DMA led control [0x000] */
unsigned char dmapad1[0x0f];
volatile unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
unsigned char dmapad2[0x03];
volatile unsigned char dma_addr1; /* DMA address [0x014] */
unsigned char dmapad3[0x03];
volatile unsigned char dma_addr2; /* DMA address [0x018] */
unsigned char dmapad4[0x03];
volatile unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
};
#define BLZ2060_DMA_WRITE 0x80000000
/* DMA control bits */
#define BLZ2060_DMA_LED 0x02 /* HD led control 1 = off */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_2060, z))) {
unsigned long board = z->resource.start;
if (request_mem_region(board+BLZ2060_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0);
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the Blizzard are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
address = (unsigned long)ZTWO_VADDR(board);
esp->dregs = (void *)(address + BLZ2060_DMA_ADDR);
/* ESP register base */
esp->eregs = (struct ESP_regs *)(address + BLZ2060_ESP_ADDR);
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"Blizzard 2060 SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the Blizzard DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the Blizzard DMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
struct blz2060_dma_registers *dregs =
(struct blz2060_dma_registers *) (esp->dregs);
cache_clear(addr, length);
addr >>= 1;
addr &= ~(BLZ2060_DMA_WRITE);
dregs->dma_addr3 = (addr ) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr0 = (addr >> 24) & 0xff;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
struct blz2060_dma_registers *dregs =
(struct blz2060_dma_registers *) (esp->dregs);
cache_push(addr, length);
addr >>= 1;
addr |= BLZ2060_DMA_WRITE;
dregs->dma_addr3 = (addr ) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr0 = (addr >> 24) & 0xff;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static void dma_led_off(struct NCR_ESP *esp)
{
((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl =
BLZ2060_DMA_LED;
}
static void dma_led_on(struct NCR_ESP *esp)
{
((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 0;
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int blz2060_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-blz2060",
.proc_info = esp_proc_info,
.name = "Blizzard2060 SCSI",
.detect = blz2060_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = blz2060_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");
This diff is collapsed.
/* cyberstormII.c: Driver for CyberStorm SCSI Mk II
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on cyberstorm.c
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define CYBERII_ESP_ADDR 0x1ff03
#define CYBERII_DMA_ADDR 0x1ff43
/* The CyberStorm II DMA interface */
struct cyberII_dma_registers {
volatile unsigned char cond_reg; /* DMA cond (ro) [0x000] */
#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
unsigned char dmapad4[0x3f];
volatile unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
unsigned char dmapad1[3];
volatile unsigned char dma_addr1; /* DMA address [0x044] */
unsigned char dmapad2[3];
volatile unsigned char dma_addr2; /* DMA address [0x048] */
unsigned char dmapad3[3];
volatile unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
};
/* DMA control bits */
#define CYBERII_DMA_LED 0x02 /* HD led control 1 = on */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
struct ESP_regs *eregs;
if ((z = zorro_find_device(ZORRO_PROD_PHASE5_CYBERSTORM_MK_II, z))) {
unsigned long board = z->resource.start;
if (request_mem_region(board+CYBERII_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/* Do some magic to figure out if the CyberStorm Mk II
* is equipped with a SCSI controller
*/
address = (unsigned long)ZTWO_VADDR(board);
eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR);
esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0);
esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
udelay(5);
if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) {
esp_deallocate(esp);
scsi_unregister(esp->ehost);
release_mem_region(board+CYBERII_ESP_ADDR,
sizeof(struct ESP_regs));
return 0; /* Bail out if address did not hold data */
}
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the CyberStorm are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
esp->dregs = (void *)(address + CYBERII_DMA_ADDR);
/* ESP register base */
esp->eregs = eregs;
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"CyberStorm SCSI Mk II", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the CyberStorm DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the CyberDMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
esp->esp_id, ((struct cyberII_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
struct cyberII_dma_registers *dregs =
(struct cyberII_dma_registers *) esp->dregs;
cache_clear(addr, length);
addr &= ~(1);
dregs->dma_addr0 = (addr >> 24) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr3 = (addr ) & 0xff;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
struct cyberII_dma_registers *dregs =
(struct cyberII_dma_registers *) esp->dregs;
cache_push(addr, length);
addr |= 1;
dregs->dma_addr0 = (addr >> 24) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr3 = (addr ) & 0xff;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
/* It's important to check the DMA IRQ bit in the correct way! */
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static void dma_led_off(struct NCR_ESP *esp)
{
((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg &= ~CYBERII_DMA_LED;
}
static void dma_led_on(struct NCR_ESP *esp)
{
((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg |= CYBERII_DMA_LED;
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int cyberII_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-cyberstormII",
.proc_info = esp_proc_info,
.name = "CyberStorm Mk II SCSI",
.detect = cyberII_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = cyberII_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");
......@@ -4267,7 +4267,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
int srb_idx = 0;
unsigned i = 0;
struct SGentry *ptr;
struct SGentry *uninitialized_var(ptr);
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
......
This diff is collapsed.
This diff is collapsed.
......@@ -629,8 +629,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
int rc;
if (tcp_conn->in.datalen) {
printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
tcp_conn->in.datalen);
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2t with datalen %d\n",
tcp_conn->in.datalen);
return ISCSI_ERR_DATALEN;
}
......@@ -644,8 +645,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
"recovery...\n", ctask->itt);
iscsi_conn_printk(KERN_INFO, conn,
"dropping R2T itt %d in recovery.\n",
ctask->itt);
return 0;
}
......@@ -655,7 +657,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->exp_statsn = rhdr->statsn;
r2t->data_length = be32_to_cpu(rhdr->data_length);
if (r2t->data_length == 0) {
printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
......@@ -668,9 +671,10 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
"offset %u and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_bufflen(ctask->sc));
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_bufflen(ctask->sc));
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
......@@ -736,8 +740,9 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/* verify PDU length */
tcp_conn->in.datalen = ntoh24(hdr->dlength);
if (tcp_conn->in.datalen > conn->max_recv_dlength) {
printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
tcp_conn->in.datalen, conn->max_recv_dlength);
iscsi_conn_printk(KERN_ERR, conn,
"iscsi_tcp: datalen %d > %d\n",
tcp_conn->in.datalen, conn->max_recv_dlength);
return ISCSI_ERR_DATALEN;
}
......@@ -819,10 +824,12 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
* For now we fail until we find a vendor that needs it
*/
if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
"but conn buffer is only %u (opcode %0x)\n",
tcp_conn->in.datalen,
ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
iscsi_conn_printk(KERN_ERR, conn,
"iscsi_tcp: received buffer of "
"len %u but conn buffer is only %u "
"(opcode %0x)\n",
tcp_conn->in.datalen,
ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
rc = ISCSI_ERR_PROTO;
break;
}
......@@ -1496,30 +1503,25 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->tx_hash.flags = 0;
if (IS_ERR(tcp_conn->tx_hash.tfm)) {
printk(KERN_ERR "Could not create connection due to crc32c "
"loading error %ld. Make sure the crc32c module is "
"built as a module or into the kernel\n",
PTR_ERR(tcp_conn->tx_hash.tfm));
if (IS_ERR(tcp_conn->tx_hash.tfm))
goto free_tcp_conn;
}
tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->rx_hash.flags = 0;
if (IS_ERR(tcp_conn->rx_hash.tfm)) {
printk(KERN_ERR "Could not create connection due to crc32c "
"loading error %ld. Make sure the crc32c module is "
"built as a module or into the kernel\n",
PTR_ERR(tcp_conn->rx_hash.tfm));
if (IS_ERR(tcp_conn->rx_hash.tfm))
goto free_tx_tfm;
}
return cls_conn;
free_tx_tfm:
crypto_free_hash(tcp_conn->tx_hash.tfm);
free_tcp_conn:
iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c "
"loading error. Make sure the crc32c "
"module is built as a module or into the "
"kernel\n");
kfree(tcp_conn);
tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
......@@ -1627,7 +1629,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
/* lookup for existing socket */
sock = sockfd_lookup((int)transport_eph, &err);
if (!sock) {
printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
iscsi_conn_printk(KERN_ERR, conn,
"sockfd_lookup failed %d\n", err);
return -EEXIST;
}
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* -*- mode: asm -*-
* Due to problems while transferring data I've put these routines as assembly
* code.
* Since I'm no PPC assembler guru, the code is just the assembler version of
int oktag_to_io(long *paddr,long *addr,long len)
{
long *addr2 = addr;
for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
*paddr = *addr2++;
return addr2 - addr;
}
int oktag_from_io(long *addr,long *paddr,long len)
{
long *addr2 = addr;
for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
*addr2++ = *paddr;
return addr2 - addr;
}
* assembled using gcc -O2 -S, with two exception catch points where data
* is moved to/from the IO register.
*/
#ifdef CONFIG_APUS
.file "oktagon_io.c"
gcc2_compiled.:
/*
.section ".text"
*/
.align 2
.globl oktag_to_io
.type oktag_to_io,@function
oktag_to_io:
addi 5,5,3
srwi 5,5,2
cmpwi 1,5,0
mr 9,3
mr 3,4
addi 5,5,-1
bc 12,6,.L3
.L5:
cmpwi 1,5,0
lwz 0,0(3)
addi 3,3,4
addi 5,5,-1
exp1: stw 0,0(9)
bc 4,6,.L5
.L3:
ret1: subf 3,4,3
srawi 3,3,2
blr
.Lfe1:
.size oktag_to_io,.Lfe1-oktag_to_io
.align 2
.globl oktag_from_io
.type oktag_from_io,@function
oktag_from_io:
addi 5,5,3
srwi 5,5,2
cmpwi 1,5,0
mr 9,3
addi 5,5,-1
bc 12,6,.L9
.L11:
cmpwi 1,5,0
exp2: lwz 0,0(4)
addi 5,5,-1
stw 0,0(3)
addi 3,3,4
bc 4,6,.L11
.L9:
ret2: subf 3,9,3
srawi 3,3,2
blr
.Lfe2:
.size oktag_from_io,.Lfe2-oktag_from_io
.ident "GCC: (GNU) egcs-2.90.29 980515 (egcs-1.0.3 release)"
/*
* Exception table.
* Second longword shows where to jump when an exception at the addr the first
* longword is pointing to is caught.
*/
.section __ex_table,"a"
.align 2
oktagon_except:
.long exp1,ret1
.long exp2,ret2
#else
/*
The code which follows is for 680x0 based assembler and is meant for
Linux/m68k. It was created by cross compiling the code using the
instructions given above. I then added the four labels used in the
exception handler table at the bottom of this file.
- Kevin <kcozens@interlog.com>
*/
#ifdef CONFIG_AMIGA
.file "oktagon_io.c"
.version "01.01"
gcc2_compiled.:
.text
.align 2
.globl oktag_to_io
.type oktag_to_io,@function
oktag_to_io:
link.w %a6,#0
move.l %d2,-(%sp)
move.l 8(%a6),%a1
move.l 12(%a6),%d1
move.l %d1,%a0
move.l 16(%a6),%d0
addq.l #3,%d0
lsr.l #2,%d0
subq.l #1,%d0
moveq.l #-1,%d2
cmp.l %d0,%d2
jbeq .L3
.L5:
exp1:
move.l (%a0)+,(%a1)
dbra %d0,.L5
clr.w %d0
subq.l #1,%d0
jbcc .L5
.L3:
ret1:
move.l %a0,%d0
sub.l %d1,%d0
asr.l #2,%d0
move.l -4(%a6),%d2
unlk %a6
rts
.Lfe1:
.size oktag_to_io,.Lfe1-oktag_to_io
.align 2
.globl oktag_from_io
.type oktag_from_io,@function
oktag_from_io:
link.w %a6,#0
move.l %d2,-(%sp)
move.l 8(%a6),%d1
move.l 12(%a6),%a1
move.l %d1,%a0
move.l 16(%a6),%d0
addq.l #3,%d0
lsr.l #2,%d0
subq.l #1,%d0
moveq.l #-1,%d2
cmp.l %d0,%d2
jbeq .L9
.L11:
exp2:
move.l (%a1),(%a0)+
dbra %d0,.L11
clr.w %d0
subq.l #1,%d0
jbcc .L11
.L9:
ret2:
move.l %a0,%d0
sub.l %d1,%d0
asr.l #2,%d0
move.l -4(%a6),%d2
unlk %a6
rts
.Lfe2:
.size oktag_from_io,.Lfe2-oktag_from_io
.ident "GCC: (GNU) 2.7.2.1"
/*
* Exception table.
* Second longword shows where to jump when an exception at the addr the first
* longword is pointing to is caught.
*/
.section __ex_table,"a"
.align 2
oktagon_except:
.long exp1,ret1
.long exp2,ret2
#endif
#endif
......@@ -35,7 +35,7 @@
#define BOUNCE_SIZE (64*1024)
#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE)
#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
struct ps3rom_private {
......
......@@ -428,6 +428,19 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
return 0;
if (ha->sfp_data)
goto do_read;
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->sfp_data_dma);
if (!ha->sfp_data) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate memory for SFP read-data.\n");
return 0;
}
do_read:
memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
addr = 0xa0;
for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
iter++, offset += SFP_BLOCK_SIZE) {
......@@ -835,7 +848,7 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
uint32_t speed = 0;
switch (ha->link_data_rate) {
......@@ -848,6 +861,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_4GB:
speed = 4;
break;
case PORT_SPEED_8GB:
speed = 8;
break;
}
fc_host_speed(shost) = speed;
}
......@@ -855,7 +871,7 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
uint32_t port_type = FC_PORTTYPE_UNKNOWN;
switch (ha->current_topology) {
......@@ -965,7 +981,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
int rval;
struct link_statistics *stats;
dma_addr_t stats_dma;
......@@ -1049,7 +1065,7 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_state(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
if (!ha->flags.online)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
......
......@@ -2041,8 +2041,6 @@ typedef struct vport_params {
#define VP_RET_CODE_NO_MEM 5
#define VP_RET_CODE_NOT_FOUND 6
#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
/*
* ISP operations
*/
......
......@@ -66,6 +66,7 @@ extern int ql2xqfullrampup;
extern int num_hosts;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
/*
* Global Functions in qla_mid.c source file.
......
This diff is collapsed.
......@@ -119,6 +119,13 @@ static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha)
qla2x00_get_firmware_state(ha, &fw_state);
}
static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *);
static __inline__ scsi_qla_host_t *
to_qla_parent(scsi_qla_host_t *ha)
{
return ha->parent ? ha->parent : ha;
}
/**
* qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
* @ha: HA context
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment