Commit cd9ad58d authored by David S. Miller's avatar David S. Miller

[SCSI] SUNESP: Complete driver rewrite to version 2.0

Major features:

1) Tagged queuing support.
2) Will properly negotiate for synchronous transfers even on
   devices that reject the wide negotiation message, such as
   CDROMs
3) Significantly lower kernel stack usage in interrupt
   handler path by elimination of function vector arrays,
   replaced by a top-level switch statement state machine.
4) Uses generic scsi infrastructure as much as possible to
   avoid code duplication.
5) Automatic request of sense data in response to CHECK_CONDITION
6) Portable to other platforms using ESP such as DEC and Sun3
   systems.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 16ce82d8
...@@ -1763,9 +1763,15 @@ config SUN3X_ESP ...@@ -1763,9 +1763,15 @@ config SUN3X_ESP
The ESP was an on-board SCSI controller used on Sun 3/80 The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it. machines. Say Y here to compile in support for it.
config SCSI_ESP_CORE
tristate "ESP Scsi Driver Core"
depends on SCSI
select SCSI_SPI_ATTRS
config SCSI_SUNESP config SCSI_SUNESP
tristate "Sparc ESP Scsi Driver" tristate "Sparc ESP Scsi Driver"
depends on SBUS && SCSI depends on SBUS && SCSI
select SCSI_ESP_CORE
help help
This is the driver for the Sun ESP SCSI host adapter. The ESP This is the driver for the Sun ESP SCSI host adapter. The ESP
chipset is present in most SPARC SBUS-based computers. chipset is present in most SPARC SBUS-based computers.
......
...@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o ...@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp.o obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o
obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o obj-$(CONFIG_SCSI_GDTH) += gdth.o
obj-$(CONFIG_SCSI_INITIO) += initio.o obj-$(CONFIG_SCSI_INITIO) += initio.o
obj-$(CONFIG_SCSI_INIA100) += a100u2w.o obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $
* esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI
* Processor) driver under Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_ESP_H
#define _SPARC_ESP_H
/* For dvma controller register definitions. */
#include <asm/dma.h>
/* The ESP SCSI controllers have their register sets in three
* "classes":
*
* 1) Registers which are both read and write.
* 2) Registers which are read only.
* 3) Registers which are write only.
*
* Yet, they all live within the same IO space.
*/
/* All the ESP registers are one byte each and are accessed longwords
* apart with a big-endian ordering to the bytes.
*/
/* Access Description Offset */
#define ESP_TCLOW 0x00UL /* rw Low bits of the transfer count 0x00 */
#define ESP_TCMED 0x04UL /* rw Mid bits of the transfer count 0x04 */
#define ESP_FDATA 0x08UL /* rw FIFO data bits 0x08 */
#define ESP_CMD 0x0cUL /* rw SCSI command bits 0x0c */
#define ESP_STATUS 0x10UL /* ro ESP status register 0x10 */
#define ESP_BUSID ESP_STATUS /* wo Bus ID for select/reselect 0x10 */
#define ESP_INTRPT 0x14UL /* ro Kind of interrupt 0x14 */
#define ESP_TIMEO ESP_INTRPT /* wo Timeout value for select/resel 0x14 */
#define ESP_SSTEP 0x18UL /* ro Sequence step register 0x18 */
#define ESP_STP ESP_SSTEP /* wo Transfer period per sync 0x18 */
#define ESP_FFLAGS 0x1cUL /* ro Bits of current FIFO info 0x1c */
#define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */
#define ESP_CFG1 0x20UL /* rw First configuration register 0x20 */
#define ESP_CFACT 0x24UL /* wo Clock conversion factor 0x24 */
#define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */
#define ESP_CTEST 0x28UL /* wo Chip test register 0x28 */
#define ESP_CFG2 0x2cUL /* rw Second configuration register 0x2c */
#define ESP_CFG3 0x30UL /* rw Third configuration register 0x30 */
#define ESP_TCHI 0x38UL /* rw High bits of transfer count 0x38 */
#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
#define ESP_FGRND 0x3cUL /* rw Data base for fifo 0x3c */
#define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */
#define ESP_REG_SIZE 0x40UL
/* Various revisions of the ESP board. */
enum esp_rev {
esp100 = 0x00, /* NCR53C90 - very broken */
esp100a = 0x01, /* NCR53C90A */
esp236 = 0x02,
fas236 = 0x03,
fas100a = 0x04,
fast = 0x05,
fashme = 0x06,
espunknown = 0x07
};
/* We allocate one of these for each scsi device and attach it to
* SDptr->hostdata for use in the driver
*/
struct esp_device {
unsigned char sync_min_period;
unsigned char sync_max_offset;
unsigned sync:1;
unsigned wide:1;
unsigned disconnect:1;
};
struct scsi_cmnd;
/* We get one of these for each ESP probed. */
struct esp {
void __iomem *eregs; /* ESP controller registers */
void __iomem *dregs; /* DMA controller registers */
struct sbus_dma *dma; /* DMA controller sw state */
struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
struct sbus_dev *sdev; /* Pointer to SBus entry */
/* ESP Configuration Registers */
u8 config1; /* Copy of the 1st config register */
u8 config2; /* Copy of the 2nd config register */
u8 config3[16]; /* Copy of the 3rd config register */
/* The current command we are sending to the ESP chip. This esp_command
* ptr needs to be mapped in DVMA area so we can send commands and read
* from the ESP fifo without burning precious CPU cycles. Programmed I/O
* sucks when we have the DVMA to do it for us. The ESP is stupid and will
* only send out 6, 10, and 12 byte SCSI commands, others we need to send
* one byte at a time. esp_slowcmd being set says that we are doing one
* of the command types ESP doesn't understand, esp_scmdp keeps track of
* which byte we are sending, esp_scmdleft says how many bytes to go.
*/
volatile u8 *esp_command; /* Location of command (CPU view) */
__u32 esp_command_dvma;/* Location of command (DVMA view) */
unsigned char esp_clen; /* Length of this command */
unsigned char esp_slowcmd;
unsigned char *esp_scmdp;
unsigned char esp_scmdleft;
/* The following are used to determine the cause of an IRQ. Upon every
* IRQ entry we synchronize these with the hardware registers.
*/
u8 ireg; /* Copy of ESP interrupt register */
u8 sreg; /* Copy of ESP status register */
u8 seqreg; /* Copy of ESP sequence step register */
u8 sreg2; /* Copy of HME status2 register */
/* To save register writes to the ESP, which can be expensive, we
* keep track of the previous value that various registers had for
* the last target we connected to. If they are the same for the
* current target, we skip the register writes as they are not needed.
*/
u8 prev_soff, prev_stp;
u8 prev_cfg3, __cache_pad;
/* We also keep a cache of the previous FAS/HME DMA CSR register value. */
u32 prev_hme_dmacsr;
/* The HME is the biggest piece of shit I have ever seen. */
u8 hme_fifo_workaround_buffer[16 * 2];
u8 hme_fifo_workaround_count;
/* For each target we keep track of save/restore data
* pointer information. This needs to be updated majorly
* when we add support for tagged queueing. -DaveM
*/
struct esp_pointers {
char *saved_ptr;
struct scatterlist *saved_buffer;
int saved_this_residual;
int saved_buffers_residual;
} data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
/* Clock periods, frequencies, synchronization, etc. */
unsigned int cfreq; /* Clock frequency in HZ */
unsigned int cfact; /* Clock conversion factor */
unsigned int raw_cfact; /* Raw copy from probing */
unsigned int ccycle; /* One ESP clock cycle */
unsigned int ctick; /* One ESP clock time */
unsigned int radelay; /* FAST chip req/ack delay */
unsigned int neg_defp; /* Default negotiation period */
unsigned int sync_defp; /* Default sync transfer period */
unsigned int max_period; /* longest our period can be */
unsigned int min_period; /* shortest period we can withstand */
struct esp *next; /* Next ESP we probed or NULL */
char prom_name[64]; /* Name of ESP device from prom */
int prom_node; /* Prom node where ESP found */
int esp_id; /* Unique per-ESP ID number */
/* For slow to medium speed input clock rates we shoot for 5mb/s,
* but for high input clock rates we try to do 10mb/s although I
* don't think a transfer can even run that fast with an ESP even
* with DMA2 scatter gather pipelining.
*/
#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
unsigned int snip; /* Sync. negotiation in progress */
unsigned int wnip; /* WIDE negotiation in progress */
unsigned int targets_present;/* targets spoken to before */
int current_transfer_size; /* Set at beginning of data dma */
u8 espcmdlog[32]; /* Log of current esp cmds sent. */
u8 espcmdent; /* Current entry in esp cmd log. */
/* Misc. info about this ESP */
enum esp_rev erev; /* ESP revision */
int irq; /* SBus IRQ for this ESP */
int scsi_id; /* Who am I as initiator? */
int scsi_id_mask; /* Bitmask of 'me'. */
int diff; /* Differential SCSI bus? */
int bursts; /* Burst sizes our DVMA supports */
/* Our command queues, only one cmd lives in the current_SC queue. */
struct scsi_cmnd *issue_SC; /* Commands to be issued */
struct scsi_cmnd *current_SC; /* Who is currently working the bus */
struct scsi_cmnd *disconnected_SC;/* Commands disconnected from the bus */
/* Message goo */
u8 cur_msgout[16];
u8 cur_msgin[16];
u8 prevmsgout, prevmsgin;
u8 msgout_len, msgin_len;
u8 msgout_ctr, msgin_ctr;
/* States that we cannot keep in the per cmd structure because they
* cannot be assosciated with any specific command.
*/
u8 resetting_bus;
wait_queue_head_t reset_queue;
};
/* Bitfield meanings for the above registers. */
/* ESP config reg 1, read-write, found on all ESP chips */
#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */
#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */
#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */
#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */
#define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */
#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */
#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
#define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */
#define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */
#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */
#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */
#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */
#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */
#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */
#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */
#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */
#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */
#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */
#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */
#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */
#define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */
#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */
#define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */
#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
/* ESP command register read-write */
/* Group 1 commands: These may be sent at any point in time to the ESP
* chip. None of them can generate interrupts 'cept
* the "SCSI bus reset" command if you have not disabled
* SCSI reset interrupts in the config1 ESP register.
*/
#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
#define ESP_CMD_RC 0x02 /* Chip reset */
#define ESP_CMD_RS 0x03 /* SCSI bus reset */
/* Group 2 commands: ESP must be an initiator and connected to a target
* for these commands to work.
*/
#define ESP_CMD_TI 0x10 /* Transfer Information */
#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
#define ESP_CMD_SATN 0x1a /* Set ATN */
#define ESP_CMD_RATN 0x1b /* De-assert ATN */
/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
* to a target as the initiator for these commands to work.
*/
#define ESP_CMD_SMSG 0x20 /* Send message */
#define ESP_CMD_SSTAT 0x21 /* Send status */
#define ESP_CMD_SDATA 0x22 /* Send data */
#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
#define ESP_CMD_DCNCT 0x27 /* Disconnect */
#define ESP_CMD_RMSG 0x28 /* Receive Message */
#define ESP_CMD_RCMD 0x29 /* Receive Command */
#define ESP_CMD_RDATA 0x2a /* Receive Data */
#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
/* Group 4 commands: The ESP must be in the disconnected state and must
* not be connected to any targets as initiator for
* these commands to work.
*/
#define ESP_CMD_RSEL 0x40 /* Reselect */
#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
#define ESP_CMD_SELA 0x42 /* Select w/ATN */
#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
#define ESP_CMD_ESEL 0x44 /* Enable selection */
#define ESP_CMD_DSEL 0x45 /* Disable selections */
#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
/* This bit enables the ESP's DMA on the SBus */
#define ESP_CMD_DMA 0x80 /* Do DMA? */
/* ESP status register read-only */
#define ESP_STAT_PIO 0x01 /* IO phase bit */
#define ESP_STAT_PCD 0x02 /* CD phase bit */
#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
#define ESP_STAT_PERR 0x20 /* Parity error */
#define ESP_STAT_SPAM 0x40 /* Real bad error */
/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
* bit on other revs of the ESP.
*/
#define ESP_STAT_INTR 0x80 /* Interrupt */
/* HME only: status 2 register */
#define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */
#define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */
#define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */
#define ESP_STAT2_CREGA 0x08 /* The command reg is active now */
#define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */
#define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */
#define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */
#define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */
/* The status register can be masked with ESP_STAT_PMASK and compared
* with the following values to determine the current phase the ESP
* (at least thinks it) is in. For our purposes we also add our own
* software 'done' bit for our phase management engine.
*/
#define ESP_DOP (0) /* Data Out */
#define ESP_DIP (ESP_STAT_PIO) /* Data In */
#define ESP_CMDP (ESP_STAT_PCD) /* Command */
#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
/* ESP interrupt register read-only */
#define ESP_INTR_S 0x01 /* Select w/o ATN */
#define ESP_INTR_SATN 0x02 /* Select w/ATN */
#define ESP_INTR_RSEL 0x04 /* Reselected */
#define ESP_INTR_FDONE 0x08 /* Function done */
#define ESP_INTR_BSERV 0x10 /* Bus service */
#define ESP_INTR_DC 0x20 /* Disconnect */
#define ESP_INTR_IC 0x40 /* Illegal command given */
#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
/* Interrupt status macros */
#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
(ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
/* ESP sequence step register read-only */
#define ESP_STEP_VBITS 0x07 /* Valid bits */
#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
#define ESP_STEP_SID 0x01 /* One msg byte sent */
#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
* bytes to be lost
*/
#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
/* Ho hum, some ESP's set the step register to this as well... */
#define ESP_STEP_FINI5 0x05
#define ESP_STEP_FINI6 0x06
#define ESP_STEP_FINI7 0x07
/* ESP chip-test register read-write */
#define ESP_TEST_TARG 0x01 /* Target test mode */
#define ESP_TEST_INI 0x02 /* Initiator test mode */
#define ESP_TEST_TS 0x04 /* Tristate test mode */
/* ESP unique ID register read-only, found on fas236+fas100a only */
#define ESP_UID_F100A 0x00 /* ESP FAS100A */
#define ESP_UID_F236 0x02 /* ESP FAS236 */
#define ESP_UID_REV 0x07 /* ESP revision */
#define ESP_UID_FAM 0xf8 /* ESP family */
/* ESP fifo flags register read-only */
/* Note that the following implies a 16 byte FIFO on the ESP. */
#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */
#define ESP_FF_SSTEP 0xe0 /* Sequence step */
/* ESP clock conversion factor register write-only */
#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
#define ESP_CCF_F2 0x02 /* 10MHz */
#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
/* HME only... */
#define ESP_BUSID_RESELID 0x10
#define ESP_BUSID_CTR32BIT 0x40
#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
#endif /* !(_SPARC_ESP_H) */
/* esp_scsi.c: ESP SCSI driver.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "2.000"
#define DRV_MODULE_RELDATE "April 19, 2007"
/* SCSI bus reset settle time in seconds. */
static int esp_bus_reset_settle = 3;
static u32 esp_debug;
#define ESP_DEBUG_INTR 0x00000001
#define ESP_DEBUG_SCSICMD 0x00000002
#define ESP_DEBUG_RESET 0x00000004
#define ESP_DEBUG_MSGIN 0x00000008
#define ESP_DEBUG_MSGOUT 0x00000010
#define ESP_DEBUG_CMDDONE 0x00000020
#define ESP_DEBUG_DISCONNECT 0x00000040
#define ESP_DEBUG_DATASTART 0x00000080
#define ESP_DEBUG_DATADONE 0x00000100
#define ESP_DEBUG_RECONNECT 0x00000200
#define ESP_DEBUG_AUTOSENSE 0x00000400
#define esp_log_intr(f, a...) \
do { if (esp_debug & ESP_DEBUG_INTR) \
printk(f, ## a); \
} while (0)
#define esp_log_reset(f, a...) \
do { if (esp_debug & ESP_DEBUG_RESET) \
printk(f, ## a); \
} while (0)
#define esp_log_msgin(f, a...) \
do { if (esp_debug & ESP_DEBUG_MSGIN) \
printk(f, ## a); \
} while (0)
#define esp_log_msgout(f, a...) \
do { if (esp_debug & ESP_DEBUG_MSGOUT) \
printk(f, ## a); \
} while (0)
#define esp_log_cmddone(f, a...) \
do { if (esp_debug & ESP_DEBUG_CMDDONE) \
printk(f, ## a); \
} while (0)
#define esp_log_disconnect(f, a...) \
do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
printk(f, ## a); \
} while (0)
#define esp_log_datastart(f, a...) \
do { if (esp_debug & ESP_DEBUG_DATASTART) \
printk(f, ## a); \
} while (0)
#define esp_log_datadone(f, a...) \
do { if (esp_debug & ESP_DEBUG_DATADONE) \
printk(f, ## a); \
} while (0)
#define esp_log_reconnect(f, a...) \
do { if (esp_debug & ESP_DEBUG_RECONNECT) \
printk(f, ## a); \
} while (0)
#define esp_log_autosense(f, a...) \
do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
printk(f, ## a); \
} while (0)
#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
static void esp_log_fill_regs(struct esp *esp,
struct esp_event_ent *p)
{
p->sreg = esp->sreg;
p->seqreg = esp->seqreg;
p->sreg2 = esp->sreg2;
p->ireg = esp->ireg;
p->select_state = esp->select_state;
p->event = esp->event;
}
void scsi_esp_cmd(struct esp *esp, u8 val)
{
struct esp_event_ent *p;
int idx = esp->esp_event_cur;
p = &esp->esp_event_log[idx];
p->type = ESP_EVENT_TYPE_CMD;
p->val = val;
esp_log_fill_regs(esp, p);
esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
esp_write8(val, ESP_CMD);
}
EXPORT_SYMBOL(scsi_esp_cmd);
static void esp_event(struct esp *esp, u8 val)
{
struct esp_event_ent *p;
int idx = esp->esp_event_cur;
p = &esp->esp_event_log[idx];
p->type = ESP_EVENT_TYPE_EVENT;
p->val = val;
esp_log_fill_regs(esp, p);
esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
esp->event = val;
}
static void esp_dump_cmd_log(struct esp *esp)
{
int idx = esp->esp_event_cur;
int stop = idx;
printk(KERN_INFO PFX "esp%d: Dumping command log\n",
esp->host->unique_id);
do {
struct esp_event_ent *p = &esp->esp_event_log[idx];
printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
esp->host->unique_id, idx,
p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
printk("val[%02x] sreg[%02x] seqreg[%02x] "
"sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
p->val, p->sreg, p->seqreg,
p->sreg2, p->ireg, p->select_state, p->event);
idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
} while (idx != stop);
}
static void esp_flush_fifo(struct esp *esp)
{
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp->rev == ESP236) {
int lim = 1000;
while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
"will not clear!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
}
}
static void hme_read_fifo(struct esp *esp)
{
int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
int idx = 0;
while (fcnt--) {
esp->fifo[idx++] = esp_read8(ESP_FDATA);
esp->fifo[idx++] = esp_read8(ESP_FDATA);
}
if (esp->sreg2 & ESP_STAT2_F1BYTE) {
esp_write8(0, ESP_FDATA);
esp->fifo[idx++] = esp_read8(ESP_FDATA);
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
}
esp->fifo_cnt = idx;
}
static void esp_set_all_config3(struct esp *esp, u8 val)
{
int i;
for (i = 0; i < ESP_MAX_TARGET; i++)
esp->target[i].esp_config3 = val;
}
/* Reset the ESP chip, _not_ the SCSI bus. */
static void esp_reset_esp(struct esp *esp)
{
u8 family_code, version;
/* Now reset the ESP chip */
scsi_esp_cmd(esp, ESP_CMD_RC);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
/* Reload the configuration registers */
esp_write8(esp->cfact, ESP_CFACT);
esp->prev_stp = 0;
esp_write8(esp->prev_stp, ESP_STP);
esp->prev_soff = 0;
esp_write8(esp->prev_soff, ESP_SOFF);
esp_write8(esp->neg_defp, ESP_TIMEO);
/* This is the only point at which it is reliable to read
* the ID-code for a fast ESP chip variants.
*/
esp->max_period = ((35 * esp->ccycle) / 1000);
if (esp->rev == FAST) {
version = esp_read8(ESP_UID);
family_code = (version & 0xf8) >> 3;
if (family_code == 0x02)
esp->rev = FAS236;
else if (family_code == 0x0a)
esp->rev = FASHME; /* Version is usually '5'. */
else
esp->rev = FAS100A;
esp->min_period = ((4 * esp->ccycle) / 1000);
} else {
esp->min_period = ((5 * esp->ccycle) / 1000);
}
esp->max_period = (esp->max_period + 3)>>2;
esp->min_period = (esp->min_period + 3)>>2;
esp_write8(esp->config1, ESP_CFG1);
switch (esp->rev) {
case ESP100:
/* nothing to do */
break;
case ESP100A:
esp_write8(esp->config2, ESP_CFG2);
break;
case ESP236:
/* Slow 236 */
esp_write8(esp->config2, ESP_CFG2);
esp->prev_cfg3 = esp->target[0].esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
break;
case FASHME:
esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
/* fallthrough... */
case FAS236:
/* Fast 236 or HME */
esp_write8(esp->config2, ESP_CFG2);
if (esp->rev == FASHME) {
u8 cfg3 = esp->target[0].esp_config3;
cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
if (esp->scsi_id >= 8)
cfg3 |= ESP_CONFIG3_IDBIT3;
esp_set_all_config3(esp, cfg3);
} else {
u32 cfg3 = esp->target[0].esp_config3;
cfg3 |= ESP_CONFIG3_FCLK;
esp_set_all_config3(esp, cfg3);
}
esp->prev_cfg3 = esp->target[0].esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
if (esp->rev == FASHME) {
esp->radelay = 80;
} else {
if (esp->flags & ESP_FLAG_DIFFERENTIAL)
esp->radelay = 0;
else
esp->radelay = 96;
}
break;
case FAS100A:
/* Fast 100a */
esp_write8(esp->config2, ESP_CFG2);
esp_set_all_config3(esp,
(esp->target[0].esp_config3 |
ESP_CONFIG3_FCLOCK));
esp->prev_cfg3 = esp->target[0].esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
esp->radelay = 32;
break;
default:
break;
}
/* Eat any bitrot in the chip */
esp_read8(ESP_INTRPT);
udelay(100);
}
static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = cmd->request_buffer;
int dir = cmd->sc_data_direction;
int total, i;
if (dir == DMA_NONE)
return;
BUG_ON(cmd->use_sg == 0);
spriv->u.num_sg = esp->ops->map_sg(esp, sg,
cmd->use_sg, dir);
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
total = 0;
for (i = 0; i < spriv->u.num_sg; i++)
total += sg_dma_len(&sg[i]);
spriv->tot_residue = total;
}
static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
return ent->sense_dma +
(ent->sense_ptr - cmd->sense_buffer);
}
return sg_dma_address(p->cur_sg) +
(sg_dma_len(p->cur_sg) -
p->cur_residue);
}
static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
return SCSI_SENSE_BUFFERSIZE -
(ent->sense_ptr - cmd->sense_buffer);
}
return p->cur_residue;
}
static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd, unsigned int len)
{
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->sense_ptr += len;
return;
}
p->cur_residue -= len;
p->tot_residue -= len;
if (p->cur_residue < 0 || p->tot_residue < 0) {
printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
esp->host->unique_id);
printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
"len[%u]\n",
esp->host->unique_id,
p->cur_residue, p->tot_residue, len);
p->cur_residue = 0;
p->tot_residue = 0;
}
if (!p->cur_residue && p->tot_residue) {
p->cur_sg++;
p->cur_residue = sg_dma_len(p->cur_sg);
}
}
static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
int dir = cmd->sc_data_direction;
if (dir == DMA_NONE)
return;
esp->ops->unmap_sg(esp, cmd->request_buffer,
spriv->u.num_sg, dir);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->saved_sense_ptr = ent->sense_ptr;
return;
}
ent->saved_cur_residue = spriv->cur_residue;
ent->saved_cur_sg = spriv->cur_sg;
ent->saved_tot_residue = spriv->tot_residue;
}
static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->sense_ptr = ent->saved_sense_ptr;
return;
}
spriv->cur_residue = ent->saved_cur_residue;
spriv->cur_sg = ent->saved_cur_sg;
spriv->tot_residue = ent->saved_tot_residue;
}
static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
{
if (cmd->cmd_len == 6 ||
cmd->cmd_len == 10 ||
cmd->cmd_len == 12) {
esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
} else {
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
}
static void esp_write_tgt_config3(struct esp *esp, int tgt)
{
if (esp->rev > ESP100A) {
u8 val = esp->target[tgt].esp_config3;
if (val != esp->prev_cfg3) {
esp->prev_cfg3 = val;
esp_write8(val, ESP_CFG3);
}
}
}
static void esp_write_tgt_sync(struct esp *esp, int tgt)
{
u8 off = esp->target[tgt].esp_offset;
u8 per = esp->target[tgt].esp_period;
if (off != esp->prev_soff) {
esp->prev_soff = off;
esp_write8(off, ESP_SOFF);
}
if (per != esp->prev_stp) {
esp->prev_stp = per;
esp_write8(per, ESP_STP);
}
}
static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
{
if (esp->rev == FASHME) {
/* Arbitrary segment boundaries, 24-bit counts. */
if (dma_len > (1U << 24))
dma_len = (1U << 24);
} else {
u32 base, end;
/* ESP chip limits other variants by 16-bits of transfer
* count. Actually on FAS100A and FAS236 we could get
* 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
* in the ESP_CFG2 register but that causes other unwanted
* changes so we don't use it currently.
*/
if (dma_len > (1U << 16))
dma_len = (1U << 16);
/* All of the DMA variants hooked up to these chips
* cannot handle crossing a 24-bit address boundary.
*/
base = dma_addr & ((1U << 24) - 1U);
end = base + dma_len;
if (end > (1U << 24))
end = (1U <<24);
dma_len = end - base;
}
return dma_len;
}
static int esp_need_to_nego_wide(struct esp_target_data *tp)
{
struct scsi_target *target = tp->starget;
return spi_width(target) != tp->nego_goal_width;
}
static int esp_need_to_nego_sync(struct esp_target_data *tp)
{
struct scsi_target *target = tp->starget;
/* When offset is zero, period is "don't care". */
if (!spi_offset(target) && !tp->nego_goal_offset)
return 0;
if (spi_offset(target) == tp->nego_goal_offset &&
spi_period(target) == tp->nego_goal_period)
return 0;
return 1;
}
static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
if (!ent->tag[0]) {
/* Non-tagged, slot already taken? */
if (lp->non_tagged_cmd)
return -EBUSY;
if (lp->hold) {
/* We are being held by active tagged
* commands.
*/
if (lp->num_tagged)
return -EBUSY;
/* Tagged commands completed, we can unplug
* the queue and run this untagged command.
*/
lp->hold = 0;
} else if (lp->num_tagged) {
/* Plug the queue until num_tagged decreases
* to zero in esp_free_lun_tag.
*/
lp->hold = 1;
return -EBUSY;
}
lp->non_tagged_cmd = ent;
return 0;
} else {
/* Tagged command, see if blocked by a
* non-tagged one.
*/
if (lp->non_tagged_cmd || lp->hold)
return -EBUSY;
}
BUG_ON(lp->tagged_cmds[ent->tag[1]]);
lp->tagged_cmds[ent->tag[1]] = ent;
lp->num_tagged++;
return 0;
}
static void esp_free_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
if (ent->tag[0]) {
BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
lp->tagged_cmds[ent->tag[1]] = NULL;
lp->num_tagged--;
} else {
BUG_ON(lp->non_tagged_cmd != ent);
lp->non_tagged_cmd = NULL;
}
}
/* When a contingent allegiance conditon is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
* to send out the REQUEST_SENSE automatically, but this was difficult
* to get right especially in the presence of applications like smartd
* which use SG_IO to send out their own REQUEST_SENSE commands.
*/
static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
struct scsi_device *dev = cmd->device;
int tgt, lun;
u8 *p, val;
tgt = dev->id;
lun = dev->lun;
if (!ent->sense_ptr) {
esp_log_autosense("esp%d: Doing auto-sense for "
"tgt[%d] lun[%d]\n",
esp->host->unique_id, tgt, lun);
ent->sense_ptr = cmd->sense_buffer;
ent->sense_dma = esp->ops->map_single(esp,
ent->sense_ptr,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
}
ent->saved_sense_ptr = ent->sense_ptr;
esp->active_cmd = ent;
p = esp->command_block;
esp->msg_out_len = 0;
*p++ = IDENTIFY(0, lun);
*p++ = REQUEST_SENSE;
*p++ = ((dev->scsi_level <= SCSI_2) ?
(lun << 5) : 0);
*p++ = 0;
*p++ = 0;
*p++ = SCSI_SENSE_BUFFERSIZE;
*p++ = 0;
esp->select_state = ESP_SELECT_BASIC;
val = tgt;
if (esp->rev == FASHME)
val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
esp_write8(val, ESP_BUSID);
esp_write_tgt_sync(esp, tgt);
esp_write_tgt_config3(esp, tgt);
val = (p - esp->command_block);
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
}
static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
{
struct esp_cmd_entry *ent;
list_for_each_entry(ent, &esp->queued_cmds, list) {
struct scsi_cmnd *cmd = ent->cmd;
struct scsi_device *dev = cmd->device;
struct esp_lun_data *lp = dev->hostdata;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->tag[0] = 0;
ent->tag[1] = 0;
return ent;
}
if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
ent->tag[0] = 0;
ent->tag[1] = 0;
}
if (esp_alloc_lun_tag(ent, lp) < 0)
continue;
return ent;
}
return NULL;
}
static void esp_maybe_execute_command(struct esp *esp)
{
struct esp_target_data *tp;
struct esp_lun_data *lp;
struct scsi_device *dev;
struct scsi_cmnd *cmd;
struct esp_cmd_entry *ent;
int tgt, lun, i;
u32 val, start_cmd;
u8 *p;
if (esp->active_cmd ||
(esp->flags & ESP_FLAG_RESETTING))
return;
ent = find_and_prep_issuable_command(esp);
if (!ent)
return;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp_autosense(esp, ent);
return;
}
cmd = ent->cmd;
dev = cmd->device;
tgt = dev->id;
lun = dev->lun;
tp = &esp->target[tgt];
lp = dev->hostdata;
list_del(&ent->list);
list_add(&ent->list, &esp->active_cmds);
esp->active_cmd = ent;
esp_map_dma(esp, cmd);
esp_save_pointers(esp, ent);
esp_check_command_len(esp, cmd);
p = esp->command_block;
esp->msg_out_len = 0;
if (tp->flags & ESP_TGT_CHECK_NEGO) {
/* Need to negotiate. If the target is broken
* go for synchronous transfers and non-wide.
*/
if (tp->flags & ESP_TGT_BROKEN) {
tp->flags &= ~ESP_TGT_DISCONNECT;
tp->nego_goal_period = 0;
tp->nego_goal_offset = 0;
tp->nego_goal_width = 0;
tp->nego_goal_tags = 0;
}
/* If the settings are not changing, skip this. */
if (spi_width(tp->starget) == tp->nego_goal_width &&
spi_period(tp->starget) == tp->nego_goal_period &&
spi_offset(tp->starget) == tp->nego_goal_offset) {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
goto build_identify;
}
if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
esp->msg_out_len =
spi_populate_width_msg(&esp->msg_out[0],
(tp->nego_goal_width ?
1 : 0));
tp->flags |= ESP_TGT_NEGO_WIDE;
} else if (esp_need_to_nego_sync(tp)) {
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
tp->flags |= ESP_TGT_NEGO_SYNC;
} else {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
}
/* Process it like a slow command. */
if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
build_identify:
/* If we don't have a lun-data struct yet, we're probing
* so do not disconnect. Also, do not disconnect unless
* we have a tag on this command.
*/
if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
*p++ = IDENTIFY(1, lun);
else
*p++ = IDENTIFY(0, lun);
if (ent->tag[0] && esp->rev == ESP100) {
/* ESP100 lacks select w/atn3 command, use select
* and stop instead.
*/
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
if (ent->tag[0]) {
*p++ = ent->tag[0];
*p++ = ent->tag[1];
start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
}
for (i = 0; i < cmd->cmd_len; i++)
*p++ = cmd->cmnd[i];
esp->select_state = ESP_SELECT_BASIC;
} else {
esp->cmd_bytes_left = cmd->cmd_len;
esp->cmd_bytes_ptr = &cmd->cmnd[0];
if (ent->tag[0]) {
for (i = esp->msg_out_len - 1;
i >= 0; i--)
esp->msg_out[i + 2] = esp->msg_out[i];
esp->msg_out[0] = ent->tag[0];
esp->msg_out[1] = ent->tag[1];
esp->msg_out_len += 2;
}
start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
esp->select_state = ESP_SELECT_MSGOUT;
}
val = tgt;
if (esp->rev == FASHME)
val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
esp_write8(val, ESP_BUSID);
esp_write_tgt_sync(esp, tgt);
esp_write_tgt_config3(esp, tgt);
val = (p - esp->command_block);
if (esp_debug & ESP_DEBUG_SCSICMD) {
printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
for (i = 0; i < cmd->cmd_len; i++)
printk("%02x ", cmd->cmnd[i]);
printk("]\n");
}
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
val, 16, 0, start_cmd);
}
static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
{
struct list_head *head = &esp->esp_cmd_pool;
struct esp_cmd_entry *ret;
if (list_empty(head)) {
ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
} else {
ret = list_entry(head->next, struct esp_cmd_entry, list);
list_del(&ret->list);
memset(ret, 0, sizeof(*ret));
}
return ret;
}
static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
{
list_add(&ent->list, &esp->esp_cmd_pool);
}
static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd, unsigned int result)
{
struct scsi_device *dev = cmd->device;
int tgt = dev->id;
int lun = dev->lun;
esp->active_cmd = NULL;
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, dev->hostdata);
cmd->result = result;
if (ent->eh_done) {
complete(ent->eh_done);
ent->eh_done = NULL;
}
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp->ops->unmap_single(esp, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
/* Restore the message/status bytes to what we actually
* saw originally. Also, report that we are providing
* the sense data.
*/
cmd->result = ((DRIVER_SENSE << 24) |
(DID_OK << 16) |
(COMMAND_COMPLETE << 8) |
(SAM_STAT_CHECK_CONDITION << 0));
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
if (esp_debug & ESP_DEBUG_AUTOSENSE) {
int i;
printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
esp->host->unique_id, tgt, lun);
for (i = 0; i < 18; i++)
printk("%02x ", cmd->sense_buffer[i]);
printk("]\n");
}
}
cmd->scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
esp_maybe_execute_command(esp);
}
static unsigned int compose_result(unsigned int status, unsigned int message,
unsigned int driver_code)
{
return (status | (message << 8) | (driver_code << 16));
}
static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_device *dev = ent->cmd->device;
struct esp_lun_data *lp = dev->hostdata;
scsi_track_queue_full(dev, lp->num_tagged - 1);
}
static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct scsi_device *dev = cmd->device;
struct esp *esp = host_to_esp(dev->host);
struct esp_cmd_priv *spriv;
struct esp_cmd_entry *ent;
ent = esp_get_ent(esp);
if (!ent)
return SCSI_MLQUEUE_HOST_BUSY;
ent->cmd = cmd;
cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd);
spriv->u.dma_addr = ~(dma_addr_t)0x0;
list_add_tail(&ent->list, &esp->queued_cmds);
esp_maybe_execute_command(esp);
return 0;
}
static int esp_check_gross_error(struct esp *esp)
{
if (esp->sreg & ESP_STAT_SPAM) {
/* Gross Error, could be one of:
* - top of fifo overwritten
* - top of command register overwritten
* - DMA programmed with wrong direction
* - improper phase change
*/
printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
esp->host->unique_id, esp->sreg);
/* XXX Reset the chip. XXX */
return 1;
}
return 0;
}
static int esp_check_spur_intr(struct esp *esp)
{
switch (esp->rev) {
case ESP100:
case ESP100A:
/* The interrupt pending bit of the status register cannot
* be trusted on these revisions.
*/
esp->sreg &= ~ESP_STAT_INTR;
break;
default:
if (!(esp->sreg & ESP_STAT_INTR)) {
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_SR)
return 1;
/* If the DMA is indicating interrupt pending and the
* ESP is not, the only possibility is a DMA error.
*/
if (!esp->ops->dma_error(esp)) {
printk(KERN_ERR PFX "esp%d: Spurious irq, "
"sreg=%x.\n",
esp->host->unique_id, esp->sreg);
return -1;
}
printk(KERN_ERR PFX "esp%d: DMA error\n",
esp->host->unique_id);
/* XXX Reset the chip. XXX */
return -1;
}
break;
}
return 0;
}
static void esp_schedule_reset(struct esp *esp)
{
esp_log_reset("ESP: esp_schedule_reset() from %p\n",
__builtin_return_address(0));
esp->flags |= ESP_FLAG_RESETTING;
esp_event(esp, ESP_EVENT_RESET);
}
/* In order to avoid having to add a special half-reconnected state
* into the driver we just sit here and poll through the rest of
* the reselection process to get the tag message bytes.
*/
static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
struct esp_lun_data *lp)
{
struct esp_cmd_entry *ent;
int i;
if (!lp->num_tagged) {
printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
esp->host->unique_id);
return NULL;
}
esp_log_reconnect("ESP: reconnect tag, ");
for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
if (esp->ops->irq_pending(esp))
break;
}
if (i == ESP_QUICKIRQ_LIMIT) {
printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
esp->host->unique_id);
return NULL;
}
esp->sreg = esp_read8(ESP_STATUS);
esp->ireg = esp_read8(ESP_INTRPT);
esp_log_reconnect("IRQ(%d:%x:%x), ",
i, esp->ireg, esp->sreg);
if (esp->ireg & ESP_INTR_DC) {
printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
esp->host->unique_id);
return NULL;
}
if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
esp->host->unique_id, esp->sreg);
return NULL;
}
/* DMA in the tag bytes... */
esp->command_block[0] = 0xff;
esp->command_block[1] = 0xff;
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
/* ACK the msssage. */
scsi_esp_cmd(esp, ESP_CMD_MOK);
for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
if (esp->ops->irq_pending(esp)) {
esp->sreg = esp_read8(ESP_STATUS);
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_FDONE)
break;
}
udelay(1);
}
if (i == ESP_RESELECT_TAG_LIMIT) {
printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
esp->host->unique_id);
return NULL;
}
esp->ops->dma_drain(esp);
esp->ops->dma_invalidate(esp);
esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
i, esp->ireg, esp->sreg,
esp->command_block[0],
esp->command_block[1]);
if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
esp->command_block[0] > ORDERED_QUEUE_TAG) {
printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
"type %02x.\n",
esp->host->unique_id, esp->command_block[0]);
return NULL;
}
ent = lp->tagged_cmds[esp->command_block[1]];
if (!ent) {
printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
"tag %02x.\n",
esp->host->unique_id, esp->command_block[1]);
return NULL;
}
return ent;
}
static int esp_reconnect(struct esp *esp)
{
struct esp_cmd_entry *ent;
struct esp_target_data *tp;
struct esp_lun_data *lp;
struct scsi_device *dev;
int target, lun;
BUG_ON(esp->active_cmd);
if (esp->rev == FASHME) {
/* FASHME puts the target and lun numbers directly
* into the fifo.
*/
target = esp->fifo[0];
lun = esp->fifo[1] & 0x7;
} else {
u8 bits = esp_read8(ESP_FDATA);
/* Older chips put the lun directly into the fifo, but
* the target is given as a sample of the arbitration
* lines on the bus at reselection time. So we should
* see the ID of the ESP and the one reconnecting target
* set in the bitmap.
*/
if (!(bits & esp->scsi_id_mask))
goto do_reset;
bits &= ~esp->scsi_id_mask;
if (!bits || (bits & (bits - 1)))
goto do_reset;
target = ffs(bits) - 1;
lun = (esp_read8(ESP_FDATA) & 0x7);
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp->rev == ESP100) {
u8 ireg = esp_read8(ESP_INTRPT);
/* This chip has a bug during reselection that can
* cause a spurious illegal-command interrupt, which
* we simply ACK here. Another possibility is a bus
* reset so we must check for that.
*/
if (ireg & ESP_INTR_SR)
goto do_reset;
}
scsi_esp_cmd(esp, ESP_CMD_NULL);
}
esp_write_tgt_sync(esp, target);
esp_write_tgt_config3(esp, target);
scsi_esp_cmd(esp, ESP_CMD_MOK);
if (esp->rev == FASHME)
esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
ESP_BUSID);
tp = &esp->target[target];
dev = __scsi_device_lookup_by_target(tp->starget, lun);
if (!dev) {
printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
"tgt[%u] lun[%u]\n",
esp->host->unique_id, target, lun);
goto do_reset;
}
lp = dev->hostdata;
ent = lp->non_tagged_cmd;
if (!ent) {
ent = esp_reconnect_with_tag(esp, lp);
if (!ent)
goto do_reset;
}
esp->active_cmd = ent;
if (ent->flags & ESP_CMD_FLAG_ABORT) {
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
esp_event(esp, ESP_EVENT_CHECK_PHASE);
esp_restore_pointers(esp, ent);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 1;
do_reset:
esp_schedule_reset(esp);
return 0;
}
static int esp_finish_select(struct esp *esp)
{
struct esp_cmd_entry *ent;
struct scsi_cmnd *cmd;
u8 orig_select_state;
orig_select_state = esp->select_state;
/* No longer selecting. */
esp->select_state = ESP_SELECT_NONE;
esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
ent = esp->active_cmd;
cmd = ent->cmd;
if (esp->ops->dma_error(esp)) {
/* If we see a DMA error during or as a result of selection,
* all bets are off.
*/
esp_schedule_reset(esp);
esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
return 0;
}
esp->ops->dma_invalidate(esp);
if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
struct esp_target_data *tp = &esp->target[cmd->device->id];
/* Carefully back out of the selection attempt. Release
* resources (such as DMA mapping & TAG) and reset state (such
* as message out and command delivery variables).
*/
if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
esp->cmd_bytes_ptr = NULL;
esp->cmd_bytes_left = 0;
} else {
esp->ops->unmap_single(esp, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
}
/* Now that the state is unwound properly, put back onto
* the issue queue. This command is no longer active.
*/
list_del(&ent->list);
list_add(&ent->list, &esp->queued_cmds);
esp->active_cmd = NULL;
/* Return value ignored by caller, it directly invokes
* esp_reconnect().
*/
return 0;
}
if (esp->ireg == ESP_INTR_DC) {
struct scsi_device *dev = cmd->device;
/* Disconnect. Make sure we re-negotiate sync and
* wide parameters if this target starts responding
* again in the future.
*/
esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_ESEL);
esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
return 1;
}
if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
/* Selection successful. On pre-FAST chips we have
* to do a NOP and possibly clean out the FIFO.
*/
if (esp->rev <= ESP236) {
int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
scsi_esp_cmd(esp, ESP_CMD_NULL);
if (!fcnt &&
(!esp->prev_soff ||
((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
esp_flush_fifo(esp);
}
/* If we are doing a slow command, negotiation, etc.
* we'll do the right thing as we transition to the
* next phase.
*/
esp_event(esp, ESP_EVENT_CHECK_PHASE);
return 0;
}
printk("ESP: Unexpected selection completion ireg[%x].\n",
esp->ireg);
esp_schedule_reset(esp);
return 0;
}
static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd)
{
int fifo_cnt, ecount, bytes_sent, flush_fifo;
fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
fifo_cnt <<= 1;
ecount = 0;
if (!(esp->sreg & ESP_STAT_TCNT)) {
ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
(((unsigned int)esp_read8(ESP_TCMED)) << 8));
if (esp->rev == FASHME)
ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
}
bytes_sent = esp->data_dma_len;
bytes_sent -= ecount;
if (!(ent->flags & ESP_CMD_FLAG_WRITE))
bytes_sent -= fifo_cnt;
flush_fifo = 0;
if (!esp->prev_soff) {
/* Synchronous data transfer, always flush fifo. */
flush_fifo = 1;
} else {
if (esp->rev == ESP100) {
u32 fflags, phase;
/* ESP100 has a chip bug where in the synchronous data
* phase it can mistake a final long REQ pulse from the
* target as an extra data byte. Fun.
*
* To detect this case we resample the status register
* and fifo flags. If we're still in a data phase and
* we see spurious chunks in the fifo, we return error
* to the caller which should reset and set things up
* such that we only try future transfers to this
* target in synchronous mode.
*/
esp->sreg = esp_read8(ESP_STATUS);
phase = esp->sreg & ESP_STAT_PMASK;
fflags = esp_read8(ESP_FFLAGS);
if ((phase == ESP_DOP &&
(fflags & ESP_FF_ONOTZERO)) ||
(phase == ESP_DIP &&
(fflags & ESP_FF_FBYTES)))
return -1;
}
if (!(ent->flags & ESP_CMD_FLAG_WRITE))
flush_fifo = 1;
}
if (flush_fifo)
esp_flush_fifo(esp);
return bytes_sent;
}
static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
u8 scsi_period, u8 scsi_offset,
u8 esp_stp, u8 esp_soff)
{
spi_period(tp->starget) = scsi_period;
spi_offset(tp->starget) = scsi_offset;
spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
if (esp_soff) {
esp_stp &= 0x1f;
esp_soff |= esp->radelay;
if (esp->rev >= FAS236) {
u8 bit = ESP_CONFIG3_FSCSI;
if (esp->rev >= FAS100A)
bit = ESP_CONFIG3_FAST;
if (scsi_period < 50) {
if (esp->rev == FASHME)
esp_soff &= ~esp->radelay;
tp->esp_config3 |= bit;
} else {
tp->esp_config3 &= ~bit;
}
esp->prev_cfg3 = tp->esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
}
}
tp->esp_period = esp->prev_stp = esp_stp;
tp->esp_offset = esp->prev_soff = esp_soff;
esp_write8(esp_soff, ESP_SOFF);
esp_write8(esp_stp, ESP_STP);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
spi_display_xfer_agreement(tp->starget);
}
static void esp_msgin_reject(struct esp *esp)
{
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
struct esp_target_data *tp;
int tgt;
tgt = cmd->device->id;
tp = &esp->target[tgt];
if (tp->flags & ESP_TGT_NEGO_WIDE) {
tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
if (!esp_need_to_nego_sync(tp)) {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_RATN);
} else {
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
tp->flags |= ESP_TGT_NEGO_SYNC;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
return;
}
if (tp->flags & ESP_TGT_NEGO_SYNC) {
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
tp->esp_period = 0;
tp->esp_offset = 0;
esp_setsync(esp, tp, 0, 0, 0, 0);
scsi_esp_cmd(esp, ESP_CMD_RATN);
return;
}
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
{
u8 period = esp->msg_in[3];
u8 offset = esp->msg_in[4];
u8 stp;
if (!(tp->flags & ESP_TGT_NEGO_SYNC))
goto do_reject;
if (offset > 15)
goto do_reject;
if (offset) {
int rounded_up, one_clock;
if (period > esp->max_period) {
period = offset = 0;
goto do_sdtr;
}
if (period < esp->min_period)
goto do_reject;
one_clock = esp->ccycle / 1000;
rounded_up = (period << 2);
rounded_up = (rounded_up + one_clock - 1) / one_clock;
stp = rounded_up;
if (stp && esp->rev >= FAS236) {
if (stp >= 50)
stp--;
}
} else {
stp = 0;
}
esp_setsync(esp, tp, period, offset, stp, offset);
return;
do_reject:
esp->msg_out[0] = MESSAGE_REJECT;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
return;
do_sdtr:
tp->nego_goal_period = period;
tp->nego_goal_offset = offset;
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
{
int size = 8 << esp->msg_in[3];
u8 cfg3;
if (esp->rev != FASHME)
goto do_reject;
if (size != 8 && size != 16)
goto do_reject;
if (!(tp->flags & ESP_TGT_NEGO_WIDE))
goto do_reject;
cfg3 = tp->esp_config3;
if (size == 16) {
tp->flags |= ESP_TGT_WIDE;
cfg3 |= ESP_CONFIG3_EWIDE;
} else {
tp->flags &= ~ESP_TGT_WIDE;
cfg3 &= ~ESP_CONFIG3_EWIDE;
}
tp->esp_config3 = cfg3;
esp->prev_cfg3 = cfg3;
esp_write8(cfg3, ESP_CFG3);
tp->flags &= ~ESP_TGT_NEGO_WIDE;
spi_period(tp->starget) = 0;
spi_offset(tp->starget) = 0;
if (!esp_need_to_nego_sync(tp)) {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_RATN);
} else {
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
tp->flags |= ESP_TGT_NEGO_SYNC;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
return;
do_reject:
esp->msg_out[0] = MESSAGE_REJECT;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
static void esp_msgin_extended(struct esp *esp)
{
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
struct esp_target_data *tp;
int tgt = cmd->device->id;
tp = &esp->target[tgt];
if (esp->msg_in[2] == EXTENDED_SDTR) {
esp_msgin_sdtr(esp, tp);
return;
}
if (esp->msg_in[2] == EXTENDED_WDTR) {
esp_msgin_wdtr(esp, tp);
return;
}
printk("ESP: Unexpected extended msg type %x\n",
esp->msg_in[2]);
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
/* Analyze msgin bytes received from target so far. Return non-zero
* if there are more bytes needed to complete the message.
*/
static int esp_msgin_process(struct esp *esp)
{
u8 msg0 = esp->msg_in[0];
int len = esp->msg_in_len;
if (msg0 & 0x80) {
/* Identify */
printk("ESP: Unexpected msgin identify\n");
return 0;
}
switch (msg0) {
case EXTENDED_MESSAGE:
if (len == 1)
return 1;
if (len < esp->msg_in[1] + 2)
return 1;
esp_msgin_extended(esp);
return 0;
case IGNORE_WIDE_RESIDUE: {
struct esp_cmd_entry *ent;
struct esp_cmd_priv *spriv;
if (len == 1)
return 1;
if (esp->msg_in[1] != 1)
goto do_reject;
ent = esp->active_cmd;
spriv = ESP_CMD_PRIV(ent->cmd);
if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
spriv->cur_sg--;
spriv->cur_residue = 1;
} else
spriv->cur_residue++;
spriv->tot_residue++;
return 0;
}
case NOP:
return 0;
case RESTORE_POINTERS:
esp_restore_pointers(esp, esp->active_cmd);
return 0;
case SAVE_POINTERS:
esp_save_pointers(esp, esp->active_cmd);
return 0;
case COMMAND_COMPLETE:
case DISCONNECT: {
struct esp_cmd_entry *ent = esp->active_cmd;
ent->message = msg0;
esp_event(esp, ESP_EVENT_FREE_BUS);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 0;
}
case MESSAGE_REJECT:
esp_msgin_reject(esp);
return 0;
default:
do_reject:
esp->msg_out[0] = MESSAGE_REJECT;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
return 0;
}
}
static int esp_process_event(struct esp *esp)
{
int write;
again:
write = 0;
switch (esp->event) {
case ESP_EVENT_CHECK_PHASE:
switch (esp->sreg & ESP_STAT_PMASK) {
case ESP_DOP:
esp_event(esp, ESP_EVENT_DATA_OUT);
break;
case ESP_DIP:
esp_event(esp, ESP_EVENT_DATA_IN);
break;
case ESP_STATP:
esp_flush_fifo(esp);
scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
esp_event(esp, ESP_EVENT_STATUS);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 1;
case ESP_MOP:
esp_event(esp, ESP_EVENT_MSGOUT);
break;
case ESP_MIP:
esp_event(esp, ESP_EVENT_MSGIN);
break;
case ESP_CMDP:
esp_event(esp, ESP_EVENT_CMD_START);
break;
default:
printk("ESP: Unexpected phase, sreg=%02x\n",
esp->sreg);
esp_schedule_reset(esp);
return 0;
}
goto again;
break;
case ESP_EVENT_DATA_IN:
write = 1;
/* fallthru */
case ESP_EVENT_DATA_OUT: {
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
unsigned int dma_len = esp_cur_dma_len(ent, cmd);
if (esp->rev == ESP100)
scsi_esp_cmd(esp, ESP_CMD_NULL);
if (write)
ent->flags |= ESP_CMD_FLAG_WRITE;
else
ent->flags &= ~ESP_CMD_FLAG_WRITE;
dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
esp->data_dma_len = dma_len;
if (!dma_len) {
printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
esp->host->unique_id);
printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n",
esp->host->unique_id,
esp_cur_dma_addr(ent, cmd),
esp_cur_dma_len(ent, cmd));
esp_schedule_reset(esp);
return 0;
}
esp_log_datastart("ESP: start data addr[%08x] len[%u] "
"write(%d)\n",
dma_addr, dma_len, write);
esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
write, ESP_CMD_DMA | ESP_CMD_TI);
esp_event(esp, ESP_EVENT_DATA_DONE);
break;
}
case ESP_EVENT_DATA_DONE: {
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
int bytes_sent;
if (esp->ops->dma_error(esp)) {
printk("ESP: data done, DMA error, resetting\n");
esp_schedule_reset(esp);
return 0;
}
if (ent->flags & ESP_CMD_FLAG_WRITE) {
/* XXX parity errors, etc. XXX */
esp->ops->dma_drain(esp);
}
esp->ops->dma_invalidate(esp);
if (esp->ireg != ESP_INTR_BSERV) {
/* We should always see exactly a bus-service
* interrupt at the end of a successful transfer.
*/
printk("ESP: data done, not BSERV, resetting\n");
esp_schedule_reset(esp);
return 0;
}
bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
ent->flags, bytes_sent);
if (bytes_sent < 0) {
/* XXX force sync mode for this target XXX */
esp_schedule_reset(esp);
return 0;
}
esp_advance_dma(esp, ent, cmd, bytes_sent);
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
break;
}
case ESP_EVENT_STATUS: {
struct esp_cmd_entry *ent = esp->active_cmd;
if (esp->ireg & ESP_INTR_FDONE) {
ent->status = esp_read8(ESP_FDATA);
ent->message = esp_read8(ESP_FDATA);
scsi_esp_cmd(esp, ESP_CMD_MOK);
} else if (esp->ireg == ESP_INTR_BSERV) {
ent->status = esp_read8(ESP_FDATA);
ent->message = 0xff;
esp_event(esp, ESP_EVENT_MSGIN);
return 0;
}
if (ent->message != COMMAND_COMPLETE) {
printk("ESP: Unexpected message %x in status\n",
ent->message);
esp_schedule_reset(esp);
return 0;
}
esp_event(esp, ESP_EVENT_FREE_BUS);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
break;
}
case ESP_EVENT_FREE_BUS: {
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
if (ent->message == COMMAND_COMPLETE ||
ent->message == DISCONNECT)
scsi_esp_cmd(esp, ESP_CMD_ESEL);
if (ent->message == COMMAND_COMPLETE) {
esp_log_cmddone("ESP: Command done status[%x] "
"message[%x]\n",
ent->status, ent->message);
if (ent->status == SAM_STAT_TASK_SET_FULL)
esp_event_queue_full(esp, ent);
if (ent->status == SAM_STAT_CHECK_CONDITION &&
!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
esp_autosense(esp, ent);
} else {
esp_cmd_is_done(esp, ent, cmd,
compose_result(ent->status,
ent->message,
DID_OK));
}
} else if (ent->message == DISCONNECT) {
esp_log_disconnect("ESP: Disconnecting tgt[%d] "
"tag[%x:%x]\n",
cmd->device->id,
ent->tag[0], ent->tag[1]);
esp->active_cmd = NULL;
esp_maybe_execute_command(esp);
} else {
printk("ESP: Unexpected message %x in freebus\n",
ent->message);
esp_schedule_reset(esp);
return 0;
}
if (esp->active_cmd)
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
break;
}
case ESP_EVENT_MSGOUT: {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp_debug & ESP_DEBUG_MSGOUT) {
int i;
printk("ESP: Sending message [ ");
for (i = 0; i < esp->msg_out_len; i++)
printk("%02x ", esp->msg_out[i]);
printk("]\n");
}
if (esp->rev == FASHME) {
int i;
/* Always use the fifo. */
for (i = 0; i < esp->msg_out_len; i++) {
esp_write8(esp->msg_out[i], ESP_FDATA);
esp_write8(0, ESP_FDATA);
}
scsi_esp_cmd(esp, ESP_CMD_TI);
} else {
if (esp->msg_out_len == 1) {
esp_write8(esp->msg_out[0], ESP_FDATA);
scsi_esp_cmd(esp, ESP_CMD_TI);
} else {
/* Use DMA. */
memcpy(esp->command_block,
esp->msg_out,
esp->msg_out_len);
esp->ops->send_dma_cmd(esp,
esp->command_block_dma,
esp->msg_out_len,
esp->msg_out_len,
0,
ESP_CMD_DMA|ESP_CMD_TI);
}
}
esp_event(esp, ESP_EVENT_MSGOUT_DONE);
break;
}
case ESP_EVENT_MSGOUT_DONE:
if (esp->rev == FASHME) {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
} else {
if (esp->msg_out_len > 1)
esp->ops->dma_invalidate(esp);
}
if (!(esp->ireg & ESP_INTR_DC)) {
if (esp->rev != FASHME)
scsi_esp_cmd(esp, ESP_CMD_NULL);
}
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
case ESP_EVENT_MSGIN:
if (esp->ireg & ESP_INTR_BSERV) {
if (esp->rev == FASHME) {
if (!(esp_read8(ESP_STATUS2) &
ESP_STAT2_FEMPTY))
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
} else {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp->rev == ESP100)
scsi_esp_cmd(esp, ESP_CMD_NULL);
}
scsi_esp_cmd(esp, ESP_CMD_TI);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 1;
}
if (esp->ireg & ESP_INTR_FDONE) {
u8 val;
if (esp->rev == FASHME)
val = esp->fifo[0];
else
val = esp_read8(ESP_FDATA);
esp->msg_in[esp->msg_in_len++] = val;
esp_log_msgin("ESP: Got msgin byte %x\n", val);
if (!esp_msgin_process(esp))
esp->msg_in_len = 0;
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
scsi_esp_cmd(esp, ESP_CMD_MOK);
if (esp->event != ESP_EVENT_FREE_BUS)
esp_event(esp, ESP_EVENT_CHECK_PHASE);
} else {
printk("ESP: MSGIN neither BSERV not FDON, resetting");
esp_schedule_reset(esp);
return 0;
}
break;
case ESP_EVENT_CMD_START:
memcpy(esp->command_block, esp->cmd_bytes_ptr,
esp->cmd_bytes_left);
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
esp->cmd_bytes_left, 16, 0,
ESP_CMD_DMA | ESP_CMD_TI);
esp_event(esp, ESP_EVENT_CMD_DONE);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
break;
case ESP_EVENT_CMD_DONE:
esp->ops->dma_invalidate(esp);
if (esp->ireg & ESP_INTR_BSERV) {
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
}
esp_schedule_reset(esp);
return 0;
break;
case ESP_EVENT_RESET:
scsi_esp_cmd(esp, ESP_CMD_RS);
break;
default:
printk("ESP: Unexpected event %x, resetting\n",
esp->event);
esp_schedule_reset(esp);
return 0;
break;
}
return 1;
}
static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
cmd->result = DID_RESET << 16;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp->ops->unmap_single(esp, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
}
cmd->scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
}
static void esp_clear_hold(struct scsi_device *dev, void *data)
{
struct esp_lun_data *lp = dev->hostdata;
BUG_ON(lp->num_tagged);
lp->hold = 0;
}
static void esp_reset_cleanup(struct esp *esp)
{
struct esp_cmd_entry *ent, *tmp;
int i;
list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
struct scsi_cmnd *cmd = ent->cmd;
list_del(&ent->list);
cmd->result = DID_RESET << 16;
cmd->scsi_done(cmd);
esp_put_ent(esp, ent);
}
list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
if (ent == esp->active_cmd)
esp->active_cmd = NULL;
esp_reset_cleanup_one(esp, ent);
}
BUG_ON(esp->active_cmd != NULL);
/* Force renegotiation of sync/wide transfers. */
for (i = 0; i < ESP_MAX_TARGET; i++) {
struct esp_target_data *tp = &esp->target[i];
tp->esp_period = 0;
tp->esp_offset = 0;
tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
ESP_CONFIG3_FSCSI |
ESP_CONFIG3_FAST);
tp->flags &= ~ESP_TGT_WIDE;
tp->flags |= ESP_TGT_CHECK_NEGO;
if (tp->starget)
starget_for_each_device(tp->starget, NULL,
esp_clear_hold);
}
}
/* Runs under host->lock */
static void __esp_interrupt(struct esp *esp)
{
int finish_reset, intr_done;
u8 phase;
esp->sreg = esp_read8(ESP_STATUS);
if (esp->flags & ESP_FLAG_RESETTING) {
finish_reset = 1;
} else {
if (esp_check_gross_error(esp))
return;
finish_reset = esp_check_spur_intr(esp);
if (finish_reset < 0)
return;
}
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_SR)
finish_reset = 1;
if (finish_reset) {
esp_reset_cleanup(esp);
if (esp->eh_reset) {
complete(esp->eh_reset);
esp->eh_reset = NULL;
}
return;
}
phase = (esp->sreg & ESP_STAT_PMASK);
if (esp->rev == FASHME) {
if (((phase != ESP_DIP && phase != ESP_DOP) &&
esp->select_state == ESP_SELECT_NONE &&
esp->event != ESP_EVENT_STATUS &&
esp->event != ESP_EVENT_DATA_DONE) ||
(esp->ireg & ESP_INTR_RSEL)) {
esp->sreg2 = esp_read8(ESP_STATUS2);
if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
(esp->sreg2 & ESP_STAT2_F1BYTE))
hme_read_fifo(esp);
}
}
esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
"sreg2[%02x] ireg[%02x]\n",
esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
intr_done = 0;
if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
printk("ESP: unexpected IREG %02x\n", esp->ireg);
if (esp->ireg & ESP_INTR_IC)
esp_dump_cmd_log(esp);
esp_schedule_reset(esp);
} else {
if (!(esp->ireg & ESP_INTR_RSEL)) {
/* Some combination of FDONE, BSERV, DC. */
if (esp->select_state != ESP_SELECT_NONE)
intr_done = esp_finish_select(esp);
} else if (esp->ireg & ESP_INTR_RSEL) {
if (esp->active_cmd)
(void) esp_finish_select(esp);
intr_done = esp_reconnect(esp);
}
}
while (!intr_done)
intr_done = esp_process_event(esp);
}
irqreturn_t scsi_esp_intr(int irq, void *dev_id)
{
struct esp *esp = dev_id;
unsigned long flags;
irqreturn_t ret;
spin_lock_irqsave(esp->host->host_lock, flags);
ret = IRQ_NONE;
if (esp->ops->irq_pending(esp)) {
ret = IRQ_HANDLED;
for (;;) {
int i;
__esp_interrupt(esp);
if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
break;
esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
if (esp->ops->irq_pending(esp))
break;
}
if (i == ESP_QUICKIRQ_LIMIT)
break;
}
}
spin_unlock_irqrestore(esp->host->host_lock, flags);
return ret;
}
EXPORT_SYMBOL(scsi_esp_intr);
static void __devinit esp_get_revision(struct esp *esp)
{
u8 val;
esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
esp_write8(esp->config2, ESP_CFG2);
val = esp_read8(ESP_CFG2);
val &= ~ESP_CONFIG2_MAGIC;
if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
/* If what we write to cfg2 does not come back, cfg2 is not
* implemented, therefore this must be a plain esp100.
*/
esp->rev = ESP100;
} else {
esp->config2 = 0;
esp_set_all_config3(esp, 5);
esp->prev_cfg3 = 5;
esp_write8(esp->config2, ESP_CFG2);
esp_write8(0, ESP_CFG3);
esp_write8(esp->prev_cfg3, ESP_CFG3);
val = esp_read8(ESP_CFG3);
if (val != 5) {
/* The cfg2 register is implemented, however
* cfg3 is not, must be esp100a.
*/
esp->rev = ESP100A;
} else {
esp_set_all_config3(esp, 0);
esp->prev_cfg3 = 0;
esp_write8(esp->prev_cfg3, ESP_CFG3);
/* All of cfg{1,2,3} implemented, must be one of
* the fas variants, figure out which one.
*/
if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
esp->rev = FAST;
esp->sync_defp = SYNC_DEFP_FAST;
} else {
esp->rev = ESP236;
}
esp->config2 = 0;
esp_write8(esp->config2, ESP_CFG2);
}
}
}
static void __devinit esp_init_swstate(struct esp *esp)
{
int i;
INIT_LIST_HEAD(&esp->queued_cmds);
INIT_LIST_HEAD(&esp->active_cmds);
INIT_LIST_HEAD(&esp->esp_cmd_pool);
/* Start with a clear state, domain validation (via ->slave_configure,
* spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
* commands.
*/
for (i = 0 ; i < ESP_MAX_TARGET; i++) {
esp->target[i].flags = 0;
esp->target[i].nego_goal_period = 0;
esp->target[i].nego_goal_offset = 0;
esp->target[i].nego_goal_width = 0;
esp->target[i].nego_goal_tags = 0;
}
}
/* This places the ESP into a known state at boot time. */
static void __devinit esp_bootup_reset(struct esp *esp)
{
u8 val;
/* Reset the DMA */
esp->ops->reset_dma(esp);
/* Reset the ESP */
esp_reset_esp(esp);
/* Reset the SCSI bus, but tell ESP not to generate an irq */
val = esp_read8(ESP_CFG1);
val |= ESP_CONFIG1_SRRDISAB;
esp_write8(val, ESP_CFG1);
scsi_esp_cmd(esp, ESP_CMD_RS);
udelay(400);
esp_write8(esp->config1, ESP_CFG1);
/* Eat any bitrot in the chip and we are done... */
esp_read8(ESP_INTRPT);
}
static void __devinit esp_set_clock_params(struct esp *esp)
{
int fmhz;
u8 ccf;
/* This is getting messy but it has to be done correctly or else
* you get weird behavior all over the place. We are trying to
* basically figure out three pieces of information.
*
* a) Clock Conversion Factor
*
* This is a representation of the input crystal clock frequency
* going into the ESP on this machine. Any operation whose timing
* is longer than 400ns depends on this value being correct. For
* example, you'll get blips for arbitration/selection during high
* load or with multiple targets if this is not set correctly.
*
* b) Selection Time-Out
*
* The ESP isn't very bright and will arbitrate for the bus and try
* to select a target forever if you let it. This value tells the
* ESP when it has taken too long to negotiate and that it should
* interrupt the CPU so we can see what happened. The value is
* computed as follows (from NCR/Symbios chip docs).
*
* (Time Out Period) * (Input Clock)
* STO = ----------------------------------
* (8192) * (Clock Conversion Factor)
*
* We use a time out period of 250ms (ESP_BUS_TIMEOUT).
*
* c) Imperical constants for synchronous offset and transfer period
* register values
*
* This entails the smallest and largest sync period we could ever
* handle on this ESP.
*/
fmhz = esp->cfreq;
ccf = ((fmhz / 1000000) + 4) / 5;
if (ccf == 1)
ccf = 2;
/* If we can't find anything reasonable, just assume 20MHZ.
* This is the clock frequency of the older sun4c's where I've
* been unable to find the clock-frequency PROM property. All
* other machines provide useful values it seems.
*/
if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
fmhz = 20000000;
ccf = 4;
}
esp->cfact = (ccf == 8 ? 0 : ccf);
esp->cfreq = fmhz;
esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
esp->ctick = ESP_TICK(ccf, esp->ccycle);
esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
esp->sync_defp = SYNC_DEFP_SLOW;
}
static const char *esp_chip_names[] = {
"ESP100",
"ESP100A",
"ESP236",
"FAS236",
"FAS100A",
"FAST",
"FASHME",
};
static struct scsi_transport_template *esp_transport_template;
int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
{
static int instance;
int err;
esp->host->transportt = esp_transport_template;
esp->host->max_lun = ESP_MAX_LUN;
esp->host->cmd_per_lun = 2;
esp_set_clock_params(esp);
esp_get_revision(esp);
esp_init_swstate(esp);
esp_bootup_reset(esp);
printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
esp->host->unique_id, esp->regs, esp->dma_regs,
esp->host->irq);
printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
esp->host->unique_id, esp_chip_names[esp->rev],
esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
/* Let the SCSI bus reset settle. */
ssleep(esp_bus_reset_settle);
err = scsi_add_host(esp->host, dev);
if (err)
return err;
esp->host->unique_id = instance++;
scsi_scan_host(esp->host);
return 0;
}
EXPORT_SYMBOL(scsi_esp_register);
void __devexit scsi_esp_unregister(struct esp *esp)
{
scsi_remove_host(esp->host);
}
EXPORT_SYMBOL(scsi_esp_unregister);
static int esp_slave_alloc(struct scsi_device *dev)
{
struct esp *esp = host_to_esp(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
struct esp_lun_data *lp;
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
if (!lp)
return -ENOMEM;
dev->hostdata = lp;
tp->starget = dev->sdev_target;
spi_min_period(tp->starget) = esp->min_period;
spi_max_offset(tp->starget) = 15;
if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
spi_max_width(tp->starget) = 1;
else
spi_max_width(tp->starget) = 0;
return 0;
}
static int esp_slave_configure(struct scsi_device *dev)
{
struct esp *esp = host_to_esp(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
goal_tags = 0;
if (dev->tagged_supported) {
/* XXX make this configurable somehow XXX */
goal_tags = ESP_DEFAULT_TAGS;
if (goal_tags > ESP_MAX_TAG)
goal_tags = ESP_MAX_TAG;
}
queue_depth = goal_tags;
if (queue_depth < dev->host->cmd_per_lun)
queue_depth = dev->host->cmd_per_lun;
if (goal_tags) {
scsi_set_tag_type(dev, MSG_ORDERED_TAG);
scsi_activate_tcq(dev, queue_depth);
} else {
scsi_deactivate_tcq(dev, queue_depth);
}
tp->flags |= ESP_TGT_DISCONNECT;
if (!spi_initial_dv(dev->sdev_target))
spi_dv_device(dev);
return 0;
}
static void esp_slave_destroy(struct scsi_device *dev)
{
struct esp_lun_data *lp = dev->hostdata;
kfree(lp);
dev->hostdata = NULL;
}
static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct esp *esp = host_to_esp(cmd->device->host);
struct esp_cmd_entry *ent, *tmp;
struct completion eh_done;
unsigned long flags;
/* XXX This helps a lot with debugging but might be a bit
* XXX much for the final driver.
*/
spin_lock_irqsave(esp->host->host_lock, flags);
printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
esp->host->unique_id, cmd, cmd->cmnd[0]);
ent = esp->active_cmd;
if (ent)
printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
list_for_each_entry(ent, &esp->queued_cmds, list) {
printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
}
list_for_each_entry(ent, &esp->active_cmds, list) {
printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
}
esp_dump_cmd_log(esp);
spin_unlock_irqrestore(esp->host->host_lock, flags);
spin_lock_irqsave(esp->host->host_lock, flags);
ent = NULL;
list_for_each_entry(tmp, &esp->queued_cmds, list) {
if (tmp->cmd == cmd) {
ent = tmp;
break;
}
}
if (ent) {
/* Easiest case, we didn't even issue the command
* yet so it is trivial to abort.
*/
list_del(&ent->list);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
esp_put_ent(esp, ent);
goto out_success;
}
init_completion(&eh_done);
ent = esp->active_cmd;
if (ent && ent->cmd == cmd) {
/* Command is the currently active command on
* the bus. If we already have an output message
* pending, no dice.
*/
if (esp->msg_out_len)
goto out_failure;
/* Send out an abort, encouraging the target to
* go to MSGOUT phase by asserting ATN.
*/
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
ent->eh_done = &eh_done;
scsi_esp_cmd(esp, ESP_CMD_SATN);
} else {
/* The command is disconnected. This is not easy to
* abort. For now we fail and let the scsi error
* handling layer go try a scsi bus reset or host
* reset.
*
* What we could do is put together a scsi command
* solely for the purpose of sending an abort message
* to the target. Coming up with all the code to
* cook up scsi commands, special case them everywhere,
* etc. is for questionable gain and it would be better
* if the generic scsi error handling layer could do at
* least some of that for us.
*
* Anyways this is an area for potential future improvement
* in this driver.
*/
goto out_failure;
}
spin_unlock_irqrestore(esp->host->host_lock, flags);
if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
spin_lock_irqsave(esp->host->host_lock, flags);
ent->eh_done = NULL;
spin_unlock_irqrestore(esp->host->host_lock, flags);
return FAILED;
}
return SUCCESS;
out_success:
spin_unlock_irqrestore(esp->host->host_lock, flags);
return SUCCESS;
out_failure:
/* XXX This might be a good location to set ESP_TGT_BROKEN
* XXX since we know which target/lun in particular is
* XXX causing trouble.
*/
spin_unlock_irqrestore(esp->host->host_lock, flags);
return FAILED;
}
static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct esp *esp = host_to_esp(cmd->device->host);
struct completion eh_reset;
unsigned long flags;
init_completion(&eh_reset);
spin_lock_irqsave(esp->host->host_lock, flags);
esp->eh_reset = &eh_reset;
/* XXX This is too simple... We should add lots of
* XXX checks here so that if we find that the chip is
* XXX very wedged we return failure immediately so
* XXX that we can perform a full chip reset.
*/
esp->flags |= ESP_FLAG_RESETTING;
scsi_esp_cmd(esp, ESP_CMD_RS);
spin_unlock_irqrestore(esp->host->host_lock, flags);
ssleep(esp_bus_reset_settle);
if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
spin_lock_irqsave(esp->host->host_lock, flags);
esp->eh_reset = NULL;
spin_unlock_irqrestore(esp->host->host_lock, flags);
return FAILED;
}
return SUCCESS;
}
/* All bets are off, reset the entire device. */
static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
struct esp *esp = host_to_esp(cmd->device->host);
unsigned long flags;
spin_lock_irqsave(esp->host->host_lock, flags);
esp_bootup_reset(esp);
esp_reset_cleanup(esp);
spin_unlock_irqrestore(esp->host->host_lock, flags);
ssleep(esp_bus_reset_settle);
return SUCCESS;
}
static const char *esp_info(struct Scsi_Host *host)
{
return "esp";
}
struct scsi_host_template scsi_esp_template = {
.module = THIS_MODULE,
.name = "esp",
.info = esp_info,
.queuecommand = esp_queuecommand,
.slave_alloc = esp_slave_alloc,
.slave_configure = esp_slave_configure,
.slave_destroy = esp_slave_destroy,
.eh_abort_handler = esp_eh_abort_handler,
.eh_bus_reset_handler = esp_eh_bus_reset_handler,
.eh_host_reset_handler = esp_eh_host_reset_handler,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff,
.skip_settle_delay = 1,
};
EXPORT_SYMBOL(scsi_esp_template);
static void esp_get_signalling(struct Scsi_Host *host)
{
struct esp *esp = host_to_esp(host);
enum spi_signal_type type;
if (esp->flags & ESP_FLAG_DIFFERENTIAL)
type = SPI_SIGNAL_HVD;
else
type = SPI_SIGNAL_SE;
spi_signalling(host) = type;
}
static void esp_set_offset(struct scsi_target *target, int offset)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
struct esp *esp = host_to_esp(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_offset = offset;
tp->flags |= ESP_TGT_CHECK_NEGO;
}
static void esp_set_period(struct scsi_target *target, int period)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
struct esp *esp = host_to_esp(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_period = period;
tp->flags |= ESP_TGT_CHECK_NEGO;
}
static void esp_set_width(struct scsi_target *target, int width)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
struct esp *esp = host_to_esp(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_width = (width ? 1 : 0);
tp->flags |= ESP_TGT_CHECK_NEGO;
}
static struct spi_function_template esp_transport_ops = {
.set_offset = esp_set_offset,
.show_offset = 1,
.set_period = esp_set_period,
.show_period = 1,
.set_width = esp_set_width,
.show_width = 1,
.get_signalling = esp_get_signalling,
};
static int __init esp_init(void)
{
BUILD_BUG_ON(sizeof(struct scsi_pointer) <
sizeof(struct esp_cmd_priv));
esp_transport_template = spi_attach_transport(&esp_transport_ops);
if (!esp_transport_template)
return -ENODEV;
return 0;
}
static void __exit esp_exit(void)
{
spi_release_transport(esp_transport_template);
}
MODULE_DESCRIPTION("ESP SCSI driver core");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_param(esp_bus_reset_settle, int, 0);
MODULE_PARM_DESC(esp_bus_reset_settle,
"ESP scsi bus reset delay in seconds");
module_param(esp_debug, int, 0);
MODULE_PARM_DESC(esp_debug,
"ESP bitmapped debugging message enable value:\n"
" 0x00000001 Log interrupt events\n"
" 0x00000002 Log scsi commands\n"
" 0x00000004 Log resets\n"
" 0x00000008 Log message in events\n"
" 0x00000010 Log message out events\n"
" 0x00000020 Log command completion\n"
" 0x00000040 Log disconnects\n"
" 0x00000080 Log data start\n"
" 0x00000100 Log data done\n"
" 0x00000200 Log reconnects\n"
" 0x00000400 Log auto-sense data\n"
);
module_init(esp_init);
module_exit(esp_exit);
/* esp_scsi.h: Defines and structures for the ESP drier.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#ifndef _ESP_SCSI_H
#define _ESP_SCSI_H
/* Access Description Offset */
#define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */
#define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */
#define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */
#define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */
#define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */
#define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */
#define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */
#define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */
#define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */
#define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */
#define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */
#define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */
#define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */
#define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */
#define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */
#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */
#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */
#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */
#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */
#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
#define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */
#define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */
#define SBUS_ESP_REG_SIZE 0x40UL
/* Bitfield meanings for the above registers. */
/* ESP config reg 1, read-write, found on all ESP chips */
#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */
#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */
#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */
#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */
#define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */
#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */
#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */
#define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */
#define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */
#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */
#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */
#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */
#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */
#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */
#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */
#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */
#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */
#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */
#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */
#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */
#define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */
#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */
#define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */
#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
/* ESP command register read-write */
/* Group 1 commands: These may be sent at any point in time to the ESP
* chip. None of them can generate interrupts 'cept
* the "SCSI bus reset" command if you have not disabled
* SCSI reset interrupts in the config1 ESP register.
*/
#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
#define ESP_CMD_RC 0x02 /* Chip reset */
#define ESP_CMD_RS 0x03 /* SCSI bus reset */
/* Group 2 commands: ESP must be an initiator and connected to a target
* for these commands to work.
*/
#define ESP_CMD_TI 0x10 /* Transfer Information */
#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
#define ESP_CMD_SATN 0x1a /* Set ATN */
#define ESP_CMD_RATN 0x1b /* De-assert ATN */
/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
* to a target as the initiator for these commands to work.
*/
#define ESP_CMD_SMSG 0x20 /* Send message */
#define ESP_CMD_SSTAT 0x21 /* Send status */
#define ESP_CMD_SDATA 0x22 /* Send data */
#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
#define ESP_CMD_DCNCT 0x27 /* Disconnect */
#define ESP_CMD_RMSG 0x28 /* Receive Message */
#define ESP_CMD_RCMD 0x29 /* Receive Command */
#define ESP_CMD_RDATA 0x2a /* Receive Data */
#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
/* Group 4 commands: The ESP must be in the disconnected state and must
* not be connected to any targets as initiator for
* these commands to work.
*/
#define ESP_CMD_RSEL 0x40 /* Reselect */
#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
#define ESP_CMD_SELA 0x42 /* Select w/ATN */
#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
#define ESP_CMD_ESEL 0x44 /* Enable selection */
#define ESP_CMD_DSEL 0x45 /* Disable selections */
#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
/* This bit enables the ESP's DMA on the SBus */
#define ESP_CMD_DMA 0x80 /* Do DMA? */
/* ESP status register read-only */
#define ESP_STAT_PIO 0x01 /* IO phase bit */
#define ESP_STAT_PCD 0x02 /* CD phase bit */
#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
#define ESP_STAT_PERR 0x20 /* Parity error */
#define ESP_STAT_SPAM 0x40 /* Real bad error */
/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
* bit on other revs of the ESP.
*/
#define ESP_STAT_INTR 0x80 /* Interrupt */
/* The status register can be masked with ESP_STAT_PMASK and compared
* with the following values to determine the current phase the ESP
* (at least thinks it) is in. For our purposes we also add our own
* software 'done' bit for our phase management engine.
*/
#define ESP_DOP (0) /* Data Out */
#define ESP_DIP (ESP_STAT_PIO) /* Data In */
#define ESP_CMDP (ESP_STAT_PCD) /* Command */
#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
/* HME only: status 2 register */
#define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */
#define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */
#define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */
#define ESP_STAT2_CREGA 0x08 /* The command reg is active now */
#define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */
#define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */
#define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */
#define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */
/* ESP interrupt register read-only */
#define ESP_INTR_S 0x01 /* Select w/o ATN */
#define ESP_INTR_SATN 0x02 /* Select w/ATN */
#define ESP_INTR_RSEL 0x04 /* Reselected */
#define ESP_INTR_FDONE 0x08 /* Function done */
#define ESP_INTR_BSERV 0x10 /* Bus service */
#define ESP_INTR_DC 0x20 /* Disconnect */
#define ESP_INTR_IC 0x40 /* Illegal command given */
#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
/* ESP sequence step register read-only */
#define ESP_STEP_VBITS 0x07 /* Valid bits */
#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
#define ESP_STEP_SID 0x01 /* One msg byte sent */
#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
* bytes to be lost
*/
#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
/* Ho hum, some ESP's set the step register to this as well... */
#define ESP_STEP_FINI5 0x05
#define ESP_STEP_FINI6 0x06
#define ESP_STEP_FINI7 0x07
/* ESP chip-test register read-write */
#define ESP_TEST_TARG 0x01 /* Target test mode */
#define ESP_TEST_INI 0x02 /* Initiator test mode */
#define ESP_TEST_TS 0x04 /* Tristate test mode */
/* ESP unique ID register read-only, found on fas236+fas100a only */
#define ESP_UID_F100A 0x00 /* ESP FAS100A */
#define ESP_UID_F236 0x02 /* ESP FAS236 */
#define ESP_UID_REV 0x07 /* ESP revision */
#define ESP_UID_FAM 0xf8 /* ESP family */
/* ESP fifo flags register read-only */
/* Note that the following implies a 16 byte FIFO on the ESP. */
#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */
#define ESP_FF_SSTEP 0xe0 /* Sequence step */
/* ESP clock conversion factor register write-only */
#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
#define ESP_CCF_F2 0x02 /* 10MHz */
#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
/* HME only... */
#define ESP_BUSID_RESELID 0x10
#define ESP_BUSID_CTR32BIT 0x40
#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
* input clock rates we try to do 10mb/s although I don't think a transfer can
* even run that fast with an ESP even with DMA2 scatter gather pipelining.
*/
#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
struct esp_cmd_priv {
union {
dma_addr_t dma_addr;
int num_sg;
} u;
unsigned int cur_residue;
struct scatterlist *cur_sg;
unsigned int tot_residue;
};
#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
enum esp_rev {
ESP100 = 0x00, /* NCR53C90 - very broken */
ESP100A = 0x01, /* NCR53C90A */
ESP236 = 0x02,
FAS236 = 0x03,
FAS100A = 0x04,
FAST = 0x05,
FASHME = 0x06,
};
struct esp_cmd_entry {
struct list_head list;
struct scsi_cmnd *cmd;
unsigned int saved_cur_residue;
struct scatterlist *saved_cur_sg;
unsigned int saved_tot_residue;
u8 flags;
#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */
#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */
#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
u8 tag[2];
u8 status;
u8 message;
unsigned char *sense_ptr;
unsigned char *saved_sense_ptr;
dma_addr_t sense_dma;
struct completion *eh_done;
};
/* XXX make this configurable somehow XXX */
#define ESP_DEFAULT_TAGS 16
#define ESP_MAX_TARGET 16
#define ESP_MAX_LUN 8
#define ESP_MAX_TAG 256
struct esp_lun_data {
struct esp_cmd_entry *non_tagged_cmd;
int num_tagged;
int hold;
struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG];
};
struct esp_target_data {
/* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which
* match the currently negotiated settings for this target. The SCSI
* protocol values are maintained in spi_{offset,period,wide}(starget).
*/
u8 esp_period;
u8 esp_offset;
u8 esp_config3;
u8 flags;
#define ESP_TGT_WIDE 0x01
#define ESP_TGT_DISCONNECT 0x02
#define ESP_TGT_NEGO_WIDE 0x04
#define ESP_TGT_NEGO_SYNC 0x08
#define ESP_TGT_CHECK_NEGO 0x40
#define ESP_TGT_BROKEN 0x80
/* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this
* device we will try to negotiate the following parameters.
*/
u8 nego_goal_period;
u8 nego_goal_offset;
u8 nego_goal_width;
u8 nego_goal_tags;
struct scsi_target *starget;
};
struct esp_event_ent {
u8 type;
#define ESP_EVENT_TYPE_EVENT 0x01
#define ESP_EVENT_TYPE_CMD 0x02
u8 val;
u8 sreg;
u8 seqreg;
u8 sreg2;
u8 ireg;
u8 select_state;
u8 event;
u8 __pad;
};
struct esp;
struct esp_driver_ops {
/* Read and write the ESP 8-bit registers. On some
* applications of the ESP chip the registers are at 4-byte
* instead of 1-byte intervals.
*/
void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
u8 (*esp_read8)(struct esp *esp, unsigned long reg);
/* Map and unmap DMA memory. Eventually the driver will be
* converted to the generic DMA API as soon as SBUS is able to
* cope with that. At such time we can remove this.
*/
dma_addr_t (*map_single)(struct esp *esp, void *buf,
size_t sz, int dir);
int (*map_sg)(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir);
void (*unmap_single)(struct esp *esp, dma_addr_t addr,
size_t sz, int dir);
void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir);
/* Return non-zero if there is an IRQ pending. Usually this
* status bit lives in the DMA controller sitting in front of
* the ESP. This has to be accurate or else the ESP interrupt
* handler will not run.
*/
int (*irq_pending)(struct esp *esp);
/* Reset the DMA engine entirely. On return, ESP interrupts
* should be enabled. Often the interrupt enabling is
* controlled in the DMA engine.
*/
void (*reset_dma)(struct esp *esp);
/* Drain any pending DMA in the DMA engine after a transfer.
* This is for writes to memory.
*/
void (*dma_drain)(struct esp *esp);
/* Invalidate the DMA engine after a DMA transfer. */
void (*dma_invalidate)(struct esp *esp);
/* Setup an ESP command that will use a DMA transfer.
* The 'esp_count' specifies what transfer length should be
* programmed into the ESP transfer counter registers, whereas
* the 'dma_count' is the length that should be programmed into
* the DMA controller. Usually they are the same. If 'write'
* is non-zero, this transfer is a write into memory. 'cmd'
* holds the ESP command that should be issued by calling
* scsi_esp_cmd() at the appropriate time while programming
* the DMA hardware.
*/
void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count,
u32 dma_count, int write, u8 cmd);
/* Return non-zero if the DMA engine is reporting an error
* currently.
*/
int (*dma_error)(struct esp *esp);
};
#define ESP_MAX_MSG_SZ 8
#define ESP_EVENT_LOG_SZ 32
#define ESP_QUICKIRQ_LIMIT 100
#define ESP_RESELECT_TAG_LIMIT 2500
struct esp {
void __iomem *regs;
void __iomem *dma_regs;
const struct esp_driver_ops *ops;
struct Scsi_Host *host;
void *dev;
struct esp_cmd_entry *active_cmd;
struct list_head queued_cmds;
struct list_head active_cmds;
u8 *command_block;
dma_addr_t command_block_dma;
unsigned int data_dma_len;
/* The following are used to determine the cause of an IRQ. Upon every
* IRQ entry we synchronize these with the hardware registers.
*/
u8 sreg;
u8 seqreg;
u8 sreg2;
u8 ireg;
u32 prev_hme_dmacsr;
u8 prev_soff;
u8 prev_stp;
u8 prev_cfg3;
u8 __pad;
struct list_head esp_cmd_pool;
struct esp_target_data target[ESP_MAX_TARGET];
int fifo_cnt;
u8 fifo[16];
struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ];
int esp_event_cur;
u8 msg_out[ESP_MAX_MSG_SZ];
int msg_out_len;
u8 msg_in[ESP_MAX_MSG_SZ];
int msg_in_len;
u8 bursts;
u8 config1;
u8 config2;
u8 scsi_id;
u32 scsi_id_mask;
enum esp_rev rev;
u32 flags;
#define ESP_FLAG_DIFFERENTIAL 0x00000001
#define ESP_FLAG_RESETTING 0x00000002
#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */
#define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */
#define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */
/* When we are not selecting, we are expecting an event. */
u8 event;
#define ESP_EVENT_NONE 0x00
#define ESP_EVENT_CMD_START 0x01
#define ESP_EVENT_CMD_DONE 0x02
#define ESP_EVENT_DATA_IN 0x03
#define ESP_EVENT_DATA_OUT 0x04
#define ESP_EVENT_DATA_DONE 0x05
#define ESP_EVENT_MSGIN 0x06
#define ESP_EVENT_MSGIN_MORE 0x07
#define ESP_EVENT_MSGIN_DONE 0x08
#define ESP_EVENT_MSGOUT 0x09
#define ESP_EVENT_MSGOUT_DONE 0x0a
#define ESP_EVENT_STATUS 0x0b
#define ESP_EVENT_FREE_BUS 0x0c
#define ESP_EVENT_CHECK_PHASE 0x0d
#define ESP_EVENT_RESET 0x10
/* Probed in esp_get_clock_params() */
u32 cfact;
u32 cfreq;
u32 ccycle;
u32 ctick;
u32 neg_defp;
u32 sync_defp;
/* Computed in esp_reset_esp() */
u32 max_period;
u32 min_period;
u32 radelay;
/* Slow command state. */
u8 *cmd_bytes_ptr;
int cmd_bytes_left;
struct completion *eh_reset;
struct sbus_dma *dma;
};
#define host_to_esp(host) ((struct esp *)(host)->hostdata)
/* A front-end driver for the ESP chip should do the following in
* it's device probe routine:
* 1) Allocate the host and private area using scsi_host_alloc()
* with size 'sizeof(struct esp)'. The first argument to
* scsi_host_alloc() should be &scsi_esp_template.
* 2) Set host->max_id as appropriate.
* 3) Set esp->host to the scsi_host itself, and esp->dev
* to the device object pointer.
* 4) Hook up esp->ops to the front-end implementation.
* 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE
* in esp->flags.
* 6) Map the DMA and ESP chip registers.
* 7) DMA map the ESP command block, store the DMA address
* in esp->command_block_dma.
* 8) Register the scsi_esp_intr() interrupt handler.
* 9) Probe for and provide the following chip properties:
* esp->scsi_id (assign to esp->host->this_id too)
* esp->scsi_id_mask
* If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL
* esp->cfreq
* DMA burst bit mask in esp->bursts, if necessary
* 10) Perform any actions necessary before the ESP device can
* be programmed for the first time. On some configs, for
* example, the DMA engine has to be reset before ESP can
* be programmed.
* 11) If necessary, call dev_set_drvdata() as needed.
* 12) Call scsi_esp_register() with prepared 'esp' structure
* and a device pointer if possible.
* 13) Check scsi_esp_register() return value, release all resources
* if an error was returned.
*/
extern struct scsi_host_template scsi_esp_template;
extern int scsi_esp_register(struct esp *, struct device *);
extern void scsi_esp_unregister(struct esp *);
extern irqreturn_t scsi_esp_intr(int, void *);
extern void scsi_esp_cmd(struct esp *, u8);
#endif /* !(_ESP_SCSI_H) */
/* sun_esp.c: ESP front-end for Sparc SBUS systems.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/sbus.h>
#include <scsi/scsi_host.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "sun_esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "1.000"
#define DRV_MODULE_RELDATE "April 19, 2007"
#define dma_read32(REG) \
sbus_readl(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
sbus_writel((VAL), esp->dma_regs + (REG))
static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev)
{
struct sbus_dev *sdev = esp->dev;
struct sbus_dma *dma;
if (dma_sdev != NULL) {
for_each_dvma(dma) {
if (dma->sdev == dma_sdev)
break;
}
} else {
for_each_dvma(dma) {
if (dma->sdev == NULL)
break;
/* If bus + slot are the same and it has the
* correct OBP name, it's ours.
*/
if (sdev->bus == dma->sdev->bus &&
sdev->slot == dma->sdev->slot &&
(!strcmp(dma->sdev->prom_name, "dma") ||
!strcmp(dma->sdev->prom_name, "espdma")))
break;
}
}
if (dma == NULL) {
printk(KERN_ERR PFX "[%s] Cannot find dma.\n",
sdev->ofdev.node->full_name);
return -ENODEV;
}
esp->dma = dma;
esp->dma_regs = dma->regs;
return 0;
}
static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
{
struct sbus_dev *sdev = esp->dev;
struct resource *res;
/* On HME, two reg sets exist, first is DVMA,
* second is ESP registers.
*/
if (hme)
res = &sdev->resource[1];
else
res = &sdev->resource[0];
esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
if (!esp->regs)
return -ENOMEM;
return 0;
}
static int __devinit esp_sbus_map_command_block(struct esp *esp)
{
struct sbus_dev *sdev = esp->dev;
esp->command_block = sbus_alloc_consistent(sdev, 16,
&esp->command_block_dma);
if (!esp->command_block)
return -ENOMEM;
return 0;
}
static int __devinit esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
struct sbus_dev *sdev = esp->dev;
host->irq = sdev->irqs[0];
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
}
static void __devinit esp_get_scsi_id(struct esp *esp)
{
struct sbus_dev *sdev = esp->dev;
struct device_node *dp = sdev->ofdev.node;
esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
if (esp->scsi_id != 0xff)
goto done;
esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
if (esp->scsi_id != 0xff)
goto done;
if (!sdev->bus) {
/* SUN4 */
esp->scsi_id = 7;
goto done;
}
esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
"scsi-initiator-id", 7);
done:
esp->host->this_id = esp->scsi_id;
esp->scsi_id_mask = (1 << esp->scsi_id);
}
static void __devinit esp_get_differential(struct esp *esp)
{
struct sbus_dev *sdev = esp->dev;
struct device_node *dp = sdev->ofdev.node;
if (of_find_property(dp, "differential", NULL))
esp->flags |= ESP_FLAG_DIFFERENTIAL;
else
esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
}
static void __devinit esp_get_clock_params(struct esp *esp)
{
struct sbus_dev *sdev = esp->dev;
struct device_node *dp = sdev->ofdev.node;
struct device_node *bus_dp;
int fmhz;
bus_dp = NULL;
if (sdev != NULL && sdev->bus != NULL)
bus_dp = sdev->bus->ofdev.node;
fmhz = of_getintprop_default(dp, "clock-frequency", 0);
if (fmhz == 0)
fmhz = (!bus_dp) ? 0 :
of_getintprop_default(bus_dp, "clock-frequency", 0);
esp->cfreq = fmhz;
}
static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
{
struct sbus_dev *sdev = esp->dev;
struct device_node *dp = sdev->ofdev.node;
u8 bursts;
bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
if (dma) {
struct device_node *dma_dp = dma->ofdev.node;
u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
if (val != 0xff)
bursts &= val;
}
if (sdev->bus) {
u8 val = of_getintprop_default(sdev->bus->ofdev.node,
"burst-sizes", 0xff);
if (val != 0xff)
bursts &= val;
}
if (bursts == 0xff ||
(bursts & DMA_BURST16) == 0 ||
(bursts & DMA_BURST32) == 0)
bursts = (DMA_BURST32 - 1);
esp->bursts = bursts;
}
static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma)
{
esp_get_scsi_id(esp);
esp_get_differential(esp);
esp_get_clock_params(esp);
esp_get_bursts(esp, espdma);
}
static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
sbus_writeb(val, esp->regs + (reg * 4UL));
}
static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
{
return sbus_readb(esp->regs + (reg * 4UL));
}
static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return sbus_map_single(esp->dev, buf, sz, dir);
}
static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return sbus_map_sg(esp->dev, sg, num_sg, dir);
}
static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
sbus_unmap_single(esp->dev, addr, sz, dir);
}
static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
sbus_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int sbus_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
return 1;
return 0;
}
static void sbus_esp_reset_dma(struct esp *esp)
{
int can_do_burst16, can_do_burst32, can_do_burst64;
int can_do_sbus64, lim;
u32 val;
can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
can_do_burst64 = 0;
can_do_sbus64 = 0;
if (sbus_can_dma_64bit(esp->dev))
can_do_sbus64 = 1;
if (sbus_can_burst64(esp->sdev))
can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
/* Put the DVMA into a known state. */
if (esp->dma->revision != dvmahme) {
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_RST_SCSI, DMA_CSR);
dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
}
switch (esp->dma->revision) {
case dvmahme:
dma_write32(DMA_RESET_FAS366, DMA_CSR);
dma_write32(DMA_RST_SCSI, DMA_CSR);
esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
DMA_SCSI_DISAB | DMA_INT_ENAB);
esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
DMA_BRST_SZ);
if (can_do_burst64)
esp->prev_hme_dmacsr |= DMA_BRST64;
else if (can_do_burst32)
esp->prev_hme_dmacsr |= DMA_BRST32;
if (can_do_sbus64) {
esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
sbus_set_sbus64(esp->dev, esp->bursts);
}
lim = 1000;
while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
"will not clear!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
dma_write32(0, DMA_CSR);
dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
dma_write32(0, DMA_ADDR);
break;
case dvmarev2:
if (esp->rev != ESP100) {
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_3CLKS, DMA_CSR);
}
break;
case dvmarev3:
val = dma_read32(DMA_CSR);
val &= ~DMA_3CLKS;
val |= DMA_2CLKS;
if (can_do_burst32) {
val &= ~DMA_BRST_SZ;
val |= DMA_BRST32;
}
dma_write32(val, DMA_CSR);
break;
case dvmaesc1:
val = dma_read32(DMA_CSR);
val |= DMA_ADD_ENABLE;
val &= ~DMA_BCNT_ENAB;
if (!can_do_burst32 && can_do_burst16) {
val |= DMA_ESC_BURST;
} else {
val &= ~(DMA_ESC_BURST);
}
dma_write32(val, DMA_CSR);
break;
default:
break;
}
/* Enable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_INT_ENAB, DMA_CSR);
}
static void sbus_esp_dma_drain(struct esp *esp)
{
u32 csr;
int lim;
if (esp->dma->revision == dvmahme)
return;
csr = dma_read32(DMA_CSR);
if (!(csr & DMA_FIFO_ISDRAIN))
return;
if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1)
dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
lim = 1000;
while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
}
static void sbus_esp_dma_invalidate(struct esp *esp)
{
if (esp->dma->revision == dvmahme) {
dma_write32(DMA_RST_SCSI, DMA_CSR);
esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
(DMA_PARITY_OFF | DMA_2CLKS |
DMA_SCSI_DISAB | DMA_INT_ENAB)) &
~(DMA_ST_WRITE | DMA_ENABLE));
dma_write32(0, DMA_CSR);
dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
/* This is necessary to avoid having the SCSI channel
* engine lock up on us.
*/
dma_write32(0, DMA_ADDR);
} else {
u32 val;
int lim;
lim = 1000;
while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not "
"invalidate!\n", esp->host->unique_id);
break;
}
udelay(1);
}
val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
val |= DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
val &= ~DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
}
}
static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
u32 csr;
BUG_ON(!(cmd & ESP_CMD_DMA));
sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
if (esp->rev == FASHME) {
sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
sbus_esp_write8(esp, 0, FAS_RHI);
scsi_esp_cmd(esp, cmd);
csr = esp->prev_hme_dmacsr;
csr |= DMA_SCSI_DISAB | DMA_ENABLE;
if (write)
csr |= DMA_ST_WRITE;
else
csr &= ~DMA_ST_WRITE;
esp->prev_hme_dmacsr = csr;
dma_write32(dma_count, DMA_COUNT);
dma_write32(addr, DMA_ADDR);
dma_write32(csr, DMA_CSR);
} else {
csr = dma_read32(DMA_CSR);
csr |= DMA_ENABLE;
if (write)
csr |= DMA_ST_WRITE;
else
csr &= ~DMA_ST_WRITE;
dma_write32(csr, DMA_CSR);
if (esp->dma->revision == dvmaesc1) {
u32 end = PAGE_ALIGN(addr + dma_count + 16U);
dma_write32(end - addr, DMA_COUNT);
}
dma_write32(addr, DMA_ADDR);
scsi_esp_cmd(esp, cmd);
}
}
static int sbus_esp_dma_error(struct esp *esp)
{
u32 csr = dma_read32(DMA_CSR);
if (csr & DMA_HNDL_ERROR)
return 1;
return 0;
}
static const struct esp_driver_ops sbus_esp_ops = {
.esp_write8 = sbus_esp_write8,
.esp_read8 = sbus_esp_read8,
.map_single = sbus_esp_map_single,
.map_sg = sbus_esp_map_sg,
.unmap_single = sbus_esp_unmap_single,
.unmap_sg = sbus_esp_unmap_sg,
.irq_pending = sbus_esp_irq_pending,
.reset_dma = sbus_esp_reset_dma,
.dma_drain = sbus_esp_dma_drain,
.dma_invalidate = sbus_esp_dma_invalidate,
.send_dma_cmd = sbus_esp_send_dma_cmd,
.dma_error = sbus_esp_dma_error,
};
static int __devinit esp_sbus_probe_one(struct device *dev,
struct sbus_dev *esp_dev,
struct sbus_dev *espdma,
struct sbus_bus *sbus,
int hme)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
int err;
host = scsi_host_alloc(tpnt, sizeof(struct esp));
err = -ENOMEM;
if (!host)
goto fail;
host->max_id = (hme ? 16 : 8);
esp = host_to_esp(host);
esp->host = host;
esp->dev = esp_dev;
esp->ops = &sbus_esp_ops;
if (hme)
esp->flags |= ESP_FLAG_WIDE_CAPABLE;
err = esp_sbus_find_dma(esp, espdma);
if (err < 0)
goto fail_unlink;
err = esp_sbus_map_regs(esp, hme);
if (err < 0)
goto fail_unlink;
err = esp_sbus_map_command_block(esp);
if (err < 0)
goto fail_unmap_regs;
err = esp_sbus_register_irq(esp);
if (err < 0)
goto fail_unmap_command_block;
esp_sbus_get_props(esp, espdma);
/* Before we try to touch the ESP chip, ESC1 dma can
* come up with the reset bit set, so make sure that
* is clear first.
*/
if (esp->dma->revision == dvmaesc1) {
u32 val = dma_read32(DMA_CSR);
dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
}
dev_set_drvdata(&esp_dev->ofdev.dev, esp);
err = scsi_esp_register(esp, dev);
if (err)
goto fail_free_irq;
return 0;
fail_free_irq:
free_irq(host->irq, esp);
fail_unmap_command_block:
sbus_free_consistent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
fail_unmap_regs:
sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
fail_unlink:
scsi_host_put(host);
fail:
return err;
}
static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
{
struct sbus_dev *sdev = to_sbus_device(&dev->dev);
struct device_node *dp = dev->node;
struct sbus_dev *dma_sdev = NULL;
int hme = 0;
if (dp->parent &&
(!strcmp(dp->parent->name, "espdma") ||
!strcmp(dp->parent->name, "dma")))
dma_sdev = sdev->parent;
else if (!strcmp(dp->name, "SUNW,fas")) {
dma_sdev = sdev;
hme = 1;
}
return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev,
sdev->bus, hme);
}
static int __devexit esp_sbus_remove(struct of_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
u32 val;
scsi_esp_unregister(esp);
/* Disable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
free_irq(irq, esp);
sbus_free_consistent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
scsi_host_put(esp->host);
return 0;
}
static struct of_device_id esp_match[] = {
{
.name = "SUNW,esp",
},
{
.name = "SUNW,fas",
},
{
.name = "esp",
},
{},
};
MODULE_DEVICE_TABLE(of, esp_match);
static struct of_platform_driver esp_sbus_driver = {
.name = "esp",
.match_table = esp_match,
.probe = esp_sbus_probe,
.remove = __devexit_p(esp_sbus_remove),
};
static int __init sunesp_init(void)
{
return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
}
static void __exit sunesp_exit(void)
{
of_unregister_driver(&esp_sbus_driver);
}
MODULE_DESCRIPTION("Sun ESP SCSI driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(sunesp_init);
module_exit(sunesp_exit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment