Commit dc84a4e4 authored by Jeff Garzik's avatar Jeff Garzik

Merge bk://kernel.bkbits.net/jgarzik/libata-2.5

into redhat.com:/spare/repo/libata-2.5-merge
parents 564768ae 8402aa96
...@@ -12,7 +12,7 @@ DOCBOOKS := wanbook.sgml z8530book.sgml mcabook.sgml videobook.sgml \ ...@@ -12,7 +12,7 @@ DOCBOOKS := wanbook.sgml z8530book.sgml mcabook.sgml videobook.sgml \
deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \ deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \
writing_usb_driver.sgml scsidrivers.sgml sis900.sgml \ writing_usb_driver.sgml scsidrivers.sgml sis900.sgml \
kernel-api.sgml journal-api.sgml lsm.sgml usb.sgml \ kernel-api.sgml journal-api.sgml lsm.sgml usb.sgml \
gadget.sgml gadget.sgml libata.sgml
### ###
# The build process is as follows (targets): # The build process is as follows (targets):
......
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V3.1//EN"[]>
<book id="libataDevGuide">
<bookinfo>
<title>libATA Developer's Guide</title>
<authorgroup>
<author>
<firstname>Jeff</firstname>
<surname>Garzik</surname>
</author>
</authorgroup>
<copyright>
<year>2003</year>
<holder>Jeff Garzik</holder>
</copyright>
<legalnotice>
<para>
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
<ulink url="http://www.opensource.org/licenses/osl-1.1.txt">http://www.opensource.org/licenses/osl-1.1.txt</ulink> and is included herein
by reference.
</para>
<para>
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
</para>
</legalnotice>
</bookinfo>
<toc></toc>
<chapter id="libataThanks">
<title>Thanks</title>
<para>
The bulk of the ATA knowledge comes thanks to long conversations with
Andre Hedrick (www.linux-ide.org).
</para>
<para>
Thanks to Alan Cox for pointing out similarities
between SATA and SCSI, and in general for motivation to hack on
libata.
</para>
<para>
libata's device detection
method, ata_pio_devchk, and in general all the early probing was
based on extensive study of Hale Landis's probe/reset code in his
ATADRVR driver (www.ata-atapi.com).
</para>
</chapter>
<chapter id="libataExt">
<title>libata Library</title>
!Edrivers/scsi/libata-core.c
!Edrivers/scsi/libata-scsi.c
</chapter>
<chapter id="libataInt">
<title>libata Internals</title>
!Idrivers/scsi/libata-core.c
!Idrivers/scsi/libata-scsi.c
</chapter>
<chapter id="PiixInt">
<title>ata_piix Internals</title>
!Idrivers/scsi/ata_piix.c
</chapter>
<chapter id="SILInt">
<title>ata_sil Internals</title>
!Idrivers/scsi/sata_sil.c
</chapter>
<chapter id="VIAInt">
<title>ata_via Internals</title>
!Idrivers/scsi/sata_via.c
</chapter>
</book>
...@@ -797,7 +797,9 @@ static struct pci_device_id piix_pci_tbl[] = { ...@@ -797,7 +797,9 @@ static struct pci_device_id piix_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_11,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_11,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15},
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16},
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_10,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_10,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17},
#ifndef CONFIG_SCSI_SATA
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18},
#endif /* !CONFIG_SCSI_SATA */
{ 0, }, { 0, },
}; };
......
...@@ -748,6 +748,60 @@ static void __init quirk_sis_96x_compatible(struct pci_dev *dev) ...@@ -748,6 +748,60 @@ static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
sis_96x_compatible = 1; sis_96x_compatible = 1;
} }
#ifdef CONFIG_SCSI_SATA
static void __init quirk_intel_ide_combined(struct pci_dev *pdev)
{
u8 prog, comb, tmp;
/*
* Narrow down to Intel SATA PCI devices.
*/
switch (pdev->device) {
/* PCI ids taken from drivers/scsi/ata_piix.c */
case 0x24d1:
case 0x24df:
case 0x25a3:
case 0x25b0:
break;
default:
/* we do not handle this PCI device */
return;
}
/*
* Read combined mode register.
*/
pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */
tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */
if (tmp == 0x4) /* bits 10x */
comb = (1 << 0); /* SATA port 0, PATA port 1 */
else if (tmp == 0x6) /* bits 11x */
comb = (1 << 2); /* PATA port 0, SATA port 1 */
else
return; /* not in combined mode */
/*
* Read programming interface register.
* (Tells us if it's legacy or native mode)
*/
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
/* if SATA port is in native mode, we're ok. */
if (prog & comb)
return;
/* SATA port is in legacy mode. Reserve port so that
* IDE driver does not attempt to use it. If request_region
* fails, it will be obvious at boot time, so we don't bother
* checking return values.
*/
if (comb == (1 << 0))
request_region(0x1f0, 8, "libata"); /* port 0 */
else
request_region(0x170, 8, "libata"); /* port 1 */
}
#endif /* CONFIG_SCSI_SATA */
/* /*
* The main table of quirks. * The main table of quirks.
* *
...@@ -851,6 +905,14 @@ static struct pci_fixup pci_fixups[] __devinitdata = { ...@@ -851,6 +905,14 @@ static struct pci_fixup pci_fixups[] __devinitdata = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc },
#ifdef CONFIG_SCSI_SATA
/* Fixup BIOSes that configure Parallel ATA (PATA / IDE) and
* Serial ATA (SATA) into the same PCI ID.
*/
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
quirk_intel_ide_combined },
#endif /* CONFIG_SCSI_SATA */
{ 0 } { 0 }
}; };
......
...@@ -403,6 +403,58 @@ config SCSI_MEGARAID ...@@ -403,6 +403,58 @@ config SCSI_MEGARAID
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called megaraid. module will be called megaraid.
config SCSI_SATA
bool "Serial ATA (SATA) support"
depends on SCSI && EXPERIMENTAL
help
This driver family supports Serial ATA host controllers
and devices.
If unsure, say N.
config SCSI_SATA_SVW
tristate "ServerWorks Frodo / Apple K2 SATA support (EXPERIMENTAL)"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for Broadcom/Serverworks/Apple K2
SATA support.
If unsure, say N.
config SCSI_ATA_PIIX
tristate "Intel PIIX/ICH SATA support"
depends on SCSI_SATA && PCI
help
This option enables support for ICH5 Serial ATA.
If PATA support was enabled previously, this enables
support for select Intel PIIX/ICH PATA host controllers.
If unsure, say N.
config SCSI_SATA_PROMISE
tristate "Promise SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for Promise Serial ATA.
If unsure, say N.
config SCSI_SATA_SIL
tristate "Silicon Image SATA support"
depends on SCSI_SATA && PCI && BROKEN
help
This option enables support for Silicon Image Serial ATA.
If unsure, say N.
config SCSI_SATA_VIA
tristate "VIA SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for VIA Serial ATA.
If unsure, say N.
config SCSI_BUSLOGIC config SCSI_BUSLOGIC
tristate "BusLogic SCSI support" tristate "BusLogic SCSI support"
depends on (PCI || ISA) && SCSI depends on (PCI || ISA) && SCSI
......
...@@ -112,6 +112,11 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o ...@@ -112,6 +112,11 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o
obj-$(CONFIG_SCSI_LASI700) += lasi700.o 53c700.o obj-$(CONFIG_SCSI_LASI700) += lasi700.o 53c700.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
obj-$(CONFIG_ARM) += arm/ obj-$(CONFIG_ARM) += arm/
...@@ -146,6 +151,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o ...@@ -146,6 +151,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \ cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \
cpqfcTSworker.o cpqfcTStrigger.o cpqfcTSworker.o cpqfcTStrigger.o
libata-objs := libata-core.o libata-scsi.o
# Files generated that shall be removed upon make clean # Files generated that shall be removed upon make clean
clean-files := 53c7xx_d.h 53c700_d.h \ clean-files := 53c7xx_d.h 53c700_d.h \
......
/*
ata_piix.c - Intel PATA/SATA controllers
Copyright 2003 Red Hat Inc
Copyright 2003 Jeff Garzik
Copyright header from piix.c:
Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
May be copied or modified under the terms of the GNU General Public License
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "0.95"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
ICH5_PCS = 0x92, /* port control and status */
PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */
PIIX_COMB_PRI = (1 << 0), /* combined mode, PATA primary */
PIIX_COMB_SEC = (1 << 1), /* combined mode, PATA secondary */
PIIX_80C_PRI = (1 << 5) | (1 << 4),
PIIX_80C_SEC = (1 << 7) | (1 << 6),
ich5_pata = 0,
ich5_sata = 1,
piix4_pata = 2,
};
static int piix_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent);
static void piix_pata_phy_reset(struct ata_port *ap);
static void piix_sata_phy_reset(struct ata_port *ap);
static void piix_sata_port_disable(struct ata_port *ap);
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio);
static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma);
static unsigned int in_module_init = 1;
static struct pci_device_id piix_pci_tbl[] = {
#ifdef ATA_ENABLE_PATA
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
{ 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
{ 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
#endif
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
{ } /* terminate list */
};
static struct pci_driver piix_pci_driver = {
.name = DRV_NAME,
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = ata_pci_remove_one,
};
static Scsi_Host_Template piix_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = ata_scsi_queuecmd,
.eh_strategy_handler = ata_scsi_error,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = ATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
};
static struct ata_port_operations piix_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = piix_set_piomode,
.set_udmamode = piix_set_udmamode,
.tf_load = ata_tf_load_pio,
.tf_read = ata_tf_read_pio,
.check_status = ata_check_status_pio,
.exec_command = ata_exec_command_pio,
.phy_reset = piix_pata_phy_reset,
.phy_config = pata_phy_config,
.bmdma_start = ata_bmdma_start_pio,
.fill_sg = ata_fill_sg,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
};
static struct ata_port_operations piix_sata_ops = {
.port_disable = piix_sata_port_disable,
.set_piomode = piix_set_piomode,
.set_udmamode = piix_set_udmamode,
.tf_load = ata_tf_load_pio,
.tf_read = ata_tf_read_pio,
.check_status = ata_check_status_pio,
.exec_command = ata_exec_command_pio,
.phy_reset = piix_sata_phy_reset,
.phy_config = pata_phy_config, /* not a typo */
.bmdma_start = ata_bmdma_start_pio,
.fill_sg = ata_fill_sg,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
};
static struct ata_port_info piix_port_info[] = {
/* ich5_pata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */
.port_ops = &piix_pata_ops,
},
/* ich5_sata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
ATA_FLAG_SRST,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &piix_sata_ops,
},
/* piix4_pata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */
.port_ops = &piix_pata_ops,
},
};
static struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
/**
* piix_pata_cbl_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from SATA PCI device's PCI config
* register. This register is normally set by firmware (BIOS).
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_pata_cbl_detect(struct ata_port *ap)
{
struct pci_dev *pdev = ap->host_set->pdev;
u8 tmp, mask;
/* no 80c support in host controller? */
if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
goto cbl40;
/* check BIOS cable detect results */
mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
if ((tmp & mask) == 0)
goto cbl40;
ap->cbl = ATA_CBL_PATA80;
return;
cbl40:
ap->cbl = ATA_CBL_PATA40;
ap->udma_mask &= ATA_UDMA_MASK_40C;
}
/**
* piix_pata_phy_reset - Probe specified port on PATA host controller
* @ap: Port to probe
*
* Probe PATA phy.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_pata_phy_reset(struct ata_port *ap)
{
if (!pci_test_config_bits(ap->host_set->pdev,
&piix_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return;
}
piix_pata_cbl_detect(ap);
ata_port_probe(ap);
ata_bus_reset(ap);
}
/**
* piix_pcs_probe - Probe SATA port configuration and status register
* @ap: Port to probe
* @have_port: (output) Non-zero if SATA port is enabled
* @have_device: (output) Non-zero if SATA phy indicates device present
*
* Reads SATA PCI device's PCI config register Port Configuration
* and Status (PCS) to determine port and device availability.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_pcs_probe (struct ata_port *ap, unsigned int *have_port,
unsigned int *have_device)
{
struct pci_dev *pdev = ap->host_set->pdev;
u16 pcs;
pci_read_config_word(pdev, ICH5_PCS, &pcs);
/* is SATA port enabled? */
if (pcs & (1 << ap->port_no)) {
*have_port = 1;
if (pcs & (1 << (ap->port_no + 4)))
*have_device = 1;
}
}
/**
* piix_pcs_disable - Disable SATA port
* @ap: Port to disable
*
* Disable SATA phy for specified port.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_pcs_disable (struct ata_port *ap)
{
struct pci_dev *pdev = ap->host_set->pdev;
u16 pcs;
pci_read_config_word(pdev, ICH5_PCS, &pcs);
if (pcs & (1 << ap->port_no)) {
pcs &= ~(1 << ap->port_no);
pci_write_config_word(pdev, ICH5_PCS, pcs);
}
}
/**
* piix_sata_phy_reset - Probe specified port on SATA host controller
* @ap: Port to probe
*
* Probe SATA phy.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_sata_phy_reset(struct ata_port *ap)
{
unsigned int have_port = 0, have_dev = 0;
if (!pci_test_config_bits(ap->host_set->pdev,
&piix_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return;
}
piix_pcs_probe(ap, &have_port, &have_dev);
/* if port not enabled, exit */
if (!have_port) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: SATA port disabled. ignoring.\n",
ap->id);
return;
}
/* if port enabled but no device, disable port and exit */
if (!have_dev) {
piix_sata_port_disable(ap);
printk(KERN_INFO "ata%u: SATA port has no device. disabling.\n",
ap->id);
return;
}
ap->cbl = ATA_CBL_SATA;
ata_port_probe(ap);
ata_bus_reset(ap);
}
/**
* piix_sata_port_disable - Disable SATA port
* @ap: Port to disable.
*
* Disable SATA port.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_sata_port_disable(struct ata_port *ap)
{
ata_port_disable(ap);
piix_pcs_disable(ap);
}
/**
* piix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
* @pio: PIO mode, 0 - 4
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio)
{
struct pci_dev *dev = ap->host_set->pdev;
unsigned int is_slave = (adev->flags & ATA_DFLAG_MASTER) ? 0 : 1;
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
master_data |= 0x4000;
/* enable PPE, IE and TIME */
master_data |= 0x0070;
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
slave_data |=
(timings[pio][0] << 2) |
(timings[pio][1] << (ap->port_no ? 4 : 0));
} else {
master_data &= 0xccf8;
/* enable PPE, IE and TIME */
master_data |= 0x0007;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
}
/**
* piix_set_udmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
* @udma: udma mode, 0 - 6
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma)
{
struct pci_dev *dev = ap->host_set->pdev;
u8 maslave = ap->port_no ? 0x42 : 0x40;
u8 speed = udma;
unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno;
int a_speed = 3 << (drive_dn * 4);
int u_flag = 1 << drive_dn;
int v_flag = 0x01 << drive_dn;
int w_flag = 0x10 << drive_dn;
int u_speed = 0;
int sitre;
u16 reg4042, reg44, reg48, reg4a, reg54;
u8 reg55;
pci_read_config_word(dev, maslave, &reg4042);
DPRINTK("reg4042 = 0x%04x\n", reg4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
pci_read_config_word(dev, 0x44, &reg44);
pci_read_config_word(dev, 0x48, &reg48);
pci_read_config_word(dev, 0x4a, &reg4a);
pci_read_config_word(dev, 0x54, &reg54);
pci_read_config_byte(dev, 0x55, &reg55);
switch(speed) {
case XFER_UDMA_4:
case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
case XFER_UDMA_6:
case XFER_UDMA_5:
case XFER_UDMA_3:
case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
default:
BUG();
return;
}
if (!(reg48 & u_flag))
pci_write_config_word(dev, 0x48, reg48|u_flag);
if (speed == XFER_UDMA_5) {
pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
} else {
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
}
if (!(reg4a & u_speed)) {
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
pci_write_config_word(dev, 0x4a, reg4a|u_speed);
}
if (speed > XFER_UDMA_2) {
if (!(reg54 & v_flag)) {
pci_write_config_word(dev, 0x54, reg54|v_flag);
}
} else {
pci_write_config_word(dev, 0x54, reg54 & ~v_flag);
}
}
/**
* piix_probe_combined - Determine if PATA and SATA are combined
* @pdev: PCI device to examine
* @mask: (output) zero, %PIIX_COMB_PRI or %PIIX_COMB_SEC
*
* Determine if BIOS has secretly stuffed a PATA port into our
* otherwise-beautiful SATA PCI device.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*/
static void piix_probe_combined (struct pci_dev *pdev, unsigned int *mask)
{
u8 tmp;
pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */
tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */
/* backwards from what one might expect */
if (tmp == 0x4) /* bits 10x */
*mask |= PIIX_COMB_SEC;
if (tmp == 0x6) /* bits 11x */
*mask |= PIIX_COMB_PRI;
}
/**
* piix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in piix_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_port_info *port_info[2];
unsigned int combined = 0, n_ports = 1;
unsigned int pata_comb = 0, sata_comb = 0;
if (!printed_version++)
printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
/* no hotplugging support (FIXME) */
if (!in_module_init)
return -ENODEV;
port_info[0] = &piix_port_info[ent->driver_data];
port_info[1] = NULL;
if (port_info[0]->host_flags & PIIX_FLAG_COMBINED)
piix_probe_combined(pdev, &combined);
if (combined & PIIX_COMB_PRI)
sata_comb = 1;
else if (combined & PIIX_COMB_SEC)
pata_comb = 1;
if (pata_comb || sata_comb) {
port_info[sata_comb] = &piix_port_info[ent->driver_data];
port_info[sata_comb]->host_flags |= ATA_FLAG_SLAVE_POSS; /* sigh */
port_info[pata_comb] = &piix_port_info[ich5_pata]; /*ich5-specific*/
n_ports++;
printk(KERN_WARNING DRV_NAME ": combined mode detected\n");
}
return ata_pci_init_one(pdev, port_info, n_ports);
}
/**
* piix_init -
*
* LOCKING:
*
* RETURNS:
*
*/
static int __init piix_init(void)
{
int rc;
DPRINTK("pci_module_init\n");
rc = pci_module_init(&piix_pci_driver);
if (rc)
return rc;
in_module_init = 0;
DPRINTK("done\n");
return 0;
}
/**
* piix_exit -
*
* LOCKING:
*
*/
static void __exit piix_exit(void)
{
pci_unregister_driver(&piix_pci_driver);
}
module_init(piix_init);
module_exit(piix_exit);
/*
libata-core.c - helper library for ATA
Copyright 2003 Red Hat, Inc. All rights reserved.
Copyright 2003 Jeff Garzik
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
http://www.opensource.org/licenses/osl-1.1.txt and is included herein
by reference.
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <scsi/scsi.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#include <asm/io.h>
#include <asm/semaphore.h>
#include "libata.h"
static void atapi_cdb_send(struct ata_port *ap);
static unsigned int ata_busy_sleep (struct ata_port *ap,
unsigned long tmout_pat,
unsigned long tmout);
static void __ata_dev_select (struct ata_port *ap, unsigned int device);
static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append);
static void ata_dma_complete(struct ata_port *ap, u8 host_stat,
unsigned int done_late);
static void ata_host_set_pio(struct ata_port *ap);
static void ata_host_set_udma(struct ata_port *ap);
static void ata_dev_set_pio(struct ata_port *ap, unsigned int device);
static void ata_dev_set_udma(struct ata_port *ap, unsigned int device);
static unsigned int ata_unique_id = 1;
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
static const char * thr_state_name[] = {
"THR_UNKNOWN",
"THR_PORT_RESET",
"THR_AWAIT_DEATH",
"THR_PROBE_FAILED",
"THR_IDLE",
"THR_PROBE_SUCCESS",
"THR_PROBE_START",
"THR_PIO_POLL",
"THR_PIO_TMOUT",
"THR_PIO",
"THR_PIO_LAST",
"THR_PIO_LAST_POLL",
"THR_PIO_ERR",
"THR_PACKET",
};
/**
* ata_thr_state_name - convert thread state enum to string
* @thr_state: thread state to be converted to string
*
* Converts the specified thread state id to a constant C string.
*
* LOCKING:
* None.
*
* RETURNS:
* The THR_xxx-prefixed string naming the specified thread
* state id, or the string "<invalid THR_xxx state>".
*/
static const char *ata_thr_state_name(unsigned int thr_state)
{
if (thr_state < ARRAY_SIZE(thr_state_name))
return thr_state_name[thr_state];
return "<invalid THR_xxx state>";
}
/**
* msleep - sleep for a number of milliseconds
* @msecs: number of milliseconds to sleep
*
* Issues schedule_timeout call for the specified number
* of milliseconds.
*
* LOCKING:
* None.
*/
static void msleep(unsigned long msecs)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(msecs));
}
/**
* ata_tf_load_pio - send taskfile registers to host controller
* @ioaddr: set of IO ports to which output is sent
* @tf: ATA taskfile register set
*
* Outputs ATA taskfile to standard ATA host controller using PIO.
*
* LOCKING:
* Inherited from caller.
*/
void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
outb(tf->ctl, ioaddr->ctl_addr);
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
outb(tf->hob_feature, ioaddr->error_addr);
outb(tf->hob_nsect, ioaddr->nsect_addr);
outb(tf->hob_lbal, ioaddr->lbal_addr);
outb(tf->hob_lbam, ioaddr->lbam_addr);
outb(tf->hob_lbah, ioaddr->lbah_addr);
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
tf->hob_feature,
tf->hob_nsect,
tf->hob_lbal,
tf->hob_lbam,
tf->hob_lbah);
}
if (is_addr) {
outb(tf->feature, ioaddr->error_addr);
outb(tf->nsect, ioaddr->nsect_addr);
outb(tf->lbal, ioaddr->lbal_addr);
outb(tf->lbam, ioaddr->lbam_addr);
outb(tf->lbah, ioaddr->lbah_addr);
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
tf->feature,
tf->nsect,
tf->lbal,
tf->lbam,
tf->lbah);
}
if (tf->flags & ATA_TFLAG_DEVICE) {
outb(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
ata_wait_idle(ap);
}
/**
* ata_tf_load_mmio - send taskfile registers to host controller
* @ioaddr: set of IO ports to which output is sent
* @tf: ATA taskfile register set
*
* Outputs ATA taskfile to standard ATA host controller using MMIO.
*
* LOCKING:
* Inherited from caller.
*/
void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
writeb(tf->ctl, ap->ioaddr.ctl_addr);
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
writeb(tf->hob_feature, (void *) ioaddr->error_addr);
writeb(tf->hob_nsect, (void *) ioaddr->nsect_addr);
writeb(tf->hob_lbal, (void *) ioaddr->lbal_addr);
writeb(tf->hob_lbam, (void *) ioaddr->lbam_addr);
writeb(tf->hob_lbah, (void *) ioaddr->lbah_addr);
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
tf->hob_feature,
tf->hob_nsect,
tf->hob_lbal,
tf->hob_lbam,
tf->hob_lbah);
}
if (is_addr) {
writeb(tf->feature, (void *) ioaddr->error_addr);
writeb(tf->nsect, (void *) ioaddr->nsect_addr);
writeb(tf->lbal, (void *) ioaddr->lbal_addr);
writeb(tf->lbam, (void *) ioaddr->lbam_addr);
writeb(tf->lbah, (void *) ioaddr->lbah_addr);
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
tf->feature,
tf->nsect,
tf->lbal,
tf->lbam,
tf->lbah);
}
if (tf->flags & ATA_TFLAG_DEVICE) {
writeb(tf->device, (void *) ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
ata_wait_idle(ap);
}
/**
* ata_exec_command_pio - issue ATA command to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues PIO write to ATA command register, with proper
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
{
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
outb(tf->command, ap->ioaddr.cmdstat_addr);
ata_pause(ap);
}
/**
* ata_exec_command_mmio - issue ATA command to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues MMIO write to ATA command register, with proper
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
{
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
writeb(tf->command, (void *) ap->ioaddr.cmdstat_addr);
ata_pause(ap);
}
/**
* ata_exec - issue ATA command to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues PIO write to ATA command register, with proper
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* Obtains host_set lock.
*/
static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
{
unsigned long flags;
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
spin_lock_irqsave(&ap->host_set->lock, flags);
ap->ops->exec_command(ap, tf);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
}
/**
* ata_tf_to_host - issue ATA taskfile to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues ATA taskfile register set to ATA host controller,
* via PIO, with proper synchronization with interrupt handler and
* other threads.
*
* LOCKING:
* Obtains host_set lock.
*/
static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
{
init_MUTEX_LOCKED(&ap->sem);
ap->ops->tf_load(ap, tf);
ata_exec(ap, tf);
}
/**
* ata_tf_to_host_nolock - issue ATA taskfile to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues ATA taskfile register set to ATA host controller,
* via PIO, with proper synchronization with interrupt handler and
* other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
{
init_MUTEX_LOCKED(&ap->sem);
ap->ops->tf_load(ap, tf);
ap->ops->exec_command(ap, tf);
}
/**
* ata_tf_read_pio - input device's ATA taskfile shadow registers
* @ioaddr: set of IO ports from which input is read
* @tf: ATA taskfile register set for storing input
*
* Reads ATA taskfile registers for currently-selected device
* into @tf via PIO.
*
* LOCKING:
* Inherited from caller.
*/
void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
tf->nsect = inb(ioaddr->nsect_addr);
tf->lbal = inb(ioaddr->lbal_addr);
tf->lbam = inb(ioaddr->lbam_addr);
tf->lbah = inb(ioaddr->lbah_addr);
tf->device = inb(ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
tf->hob_feature = inb(ioaddr->error_addr);
tf->hob_nsect = inb(ioaddr->nsect_addr);
tf->hob_lbal = inb(ioaddr->lbal_addr);
tf->hob_lbam = inb(ioaddr->lbam_addr);
tf->hob_lbah = inb(ioaddr->lbah_addr);
}
}
/**
* ata_tf_read_mmio - input device's ATA taskfile shadow registers
* @ioaddr: set of IO ports from which input is read
* @tf: ATA taskfile register set for storing input
*
* Reads ATA taskfile registers for currently-selected device
* into @tf via MMIO.
*
* LOCKING:
* Inherited from caller.
*/
void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
tf->nsect = readb((void *)ioaddr->nsect_addr);
tf->lbal = readb((void *)ioaddr->lbal_addr);
tf->lbam = readb((void *)ioaddr->lbam_addr);
tf->lbah = readb((void *)ioaddr->lbah_addr);
tf->device = readb((void *)ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
writeb(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
tf->hob_feature = readb((void *)ioaddr->error_addr);
tf->hob_nsect = readb((void *)ioaddr->nsect_addr);
tf->hob_lbal = readb((void *)ioaddr->lbal_addr);
tf->hob_lbam = readb((void *)ioaddr->lbam_addr);
tf->hob_lbah = readb((void *)ioaddr->lbah_addr);
}
}
/**
* ata_check_status_pio - Read device status reg & clear interrupt
* @ap: port where the device is
*
* Reads ATA taskfile status register for currently-selected device
* via PIO and return it's value. This also clears pending interrupts
* from this device
*
* LOCKING:
* Inherited from caller.
*/
u8 ata_check_status_pio(struct ata_port *ap)
{
return inb(ap->ioaddr.cmdstat_addr);
}
/**
* ata_check_status_mmio - Read device status reg & clear interrupt
* @ap: port where the device is
*
* Reads ATA taskfile status register for currently-selected device
* via MMIO and return it's value. This also clears pending interrupts
* from this device
*
* LOCKING:
* Inherited from caller.
*/
u8 ata_check_status_mmio(struct ata_port *ap)
{
return readb((void *) ap->ioaddr.cmdstat_addr);
}
static const char * udma_str[] = {
"UDMA/16",
"UDMA/25",
"UDMA/33",
"UDMA/44",
"UDMA/66",
"UDMA/100",
"UDMA/133",
"UDMA7",
};
/**
* ata_udma_string - convert UDMA bit offset to string
* @udma_mask: mask of bits supported; only highest bit counts.
*
* Determine string which represents the highest speed
* (highest bit in @udma_mask).
*
* LOCKING:
* None.
*
* RETURNS:
* Constant C string representing highest speed listed in
* @udma_mask, or the constant C string "<n/a>".
*/
static const char *ata_udma_string(unsigned int udma_mask)
{
int i;
for (i = 7; i >= 0; i--) {
if (udma_mask & (1 << i))
return udma_str[i];
}
return "<n/a>";
}
/**
* ata_pio_devchk -
* @ap:
* @device:
*
* LOCKING:
*
*/
static unsigned int ata_pio_devchk(struct ata_port *ap,
unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
__ata_dev_select(ap, device);
outb(0x55, ioaddr->nsect_addr);
outb(0xaa, ioaddr->lbal_addr);
outb(0xaa, ioaddr->nsect_addr);
outb(0x55, ioaddr->lbal_addr);
outb(0x55, ioaddr->nsect_addr);
outb(0xaa, ioaddr->lbal_addr);
nsect = inb(ioaddr->nsect_addr);
lbal = inb(ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
return 1; /* we found a device */
return 0; /* nothing found */
}
/**
* ata_mmio_devchk -
* @ap:
* @device:
*
* LOCKING:
*
*/
static unsigned int ata_mmio_devchk(struct ata_port *ap,
unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
__ata_dev_select(ap, device);
writeb(0x55, (void *) ioaddr->nsect_addr);
writeb(0xaa, (void *) ioaddr->lbal_addr);
writeb(0xaa, (void *) ioaddr->nsect_addr);
writeb(0x55, (void *) ioaddr->lbal_addr);
writeb(0x55, (void *) ioaddr->nsect_addr);
writeb(0xaa, (void *) ioaddr->lbal_addr);
nsect = readb((void *) ioaddr->nsect_addr);
lbal = readb((void *) ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
return 1; /* we found a device */
return 0; /* nothing found */
}
/**
* ata_dev_devchk -
* @ap:
* @device:
*
* LOCKING:
*
*/
static unsigned int ata_dev_devchk(struct ata_port *ap,
unsigned int device)
{
if (ap->flags & ATA_FLAG_MMIO)
return ata_mmio_devchk(ap, device);
return ata_pio_devchk(ap, device);
}
/**
* ata_dev_classify - determine device type based on ATA-spec signature
* @tf: ATA taskfile register set for device to be identified
*
* Determine from taskfile register contents whether a device is
* ATA or ATAPI, as per "Signature and persistence" section
* of ATA/PI spec (volume 1, sect 5.14).
*
* LOCKING:
* None.
*
* RETURNS:
* Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
* the event of failure.
*/
static unsigned int ata_dev_classify(struct ata_taskfile *tf)
{
/* Apple's open source Darwin code hints that some devices only
* put a proper signature into the LBA mid/high registers,
* So, we only check those. It's sufficient for uniqueness.
*/
if (((tf->lbam == 0) && (tf->lbah == 0)) ||
((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
DPRINTK("found ATA device by sig\n");
return ATA_DEV_ATA;
}
if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
DPRINTK("found ATAPI device by sig\n");
return ATA_DEV_ATAPI;
}
DPRINTK("unknown device\n");
return ATA_DEV_UNKNOWN;
}
/**
* ata_dev_try_classify -
* @ap:
* @device:
*
* LOCKING:
*
*/
static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device,
unsigned int maybe_have_dev)
{
struct ata_device *dev = &ap->device[device];
struct ata_taskfile tf;
unsigned int class;
u8 err;
__ata_dev_select(ap, device);
memset(&tf, 0, sizeof(tf));
err = ata_chk_err(ap);
ap->ops->tf_read(ap, &tf);
dev->class = ATA_DEV_NONE;
/* see if device passed diags */
if (err == 1)
/* do nothing */ ;
else if ((device == 0) && (err == 0x81))
/* do nothing */ ;
else
return err;
/* determine if device if ATA or ATAPI */
class = ata_dev_classify(&tf);
if (class == ATA_DEV_UNKNOWN)
return err;
if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
return err;
dev->class = class;
return err;
}
/**
* ata_dev_id_string -
* @dev:
* @s:
* @ofs:
* @len:
*
* LOCKING:
*
* RETURNS:
*
*/
unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned int c, ret = 0;
while (len > 0) {
c = dev->id[ofs] >> 8;
*s = c;
s++;
ret = c = dev->id[ofs] & 0xff;
*s = c;
s++;
ofs++;
len -= 2;
}
return ret;
}
/**
* ata_dev_parse_strings -
* @dev:
*
* LOCKING:
*/
static void ata_dev_parse_strings(struct ata_device *dev)
{
assert (dev->class == ATA_DEV_ATA);
memcpy(dev->vendor, "ATA ", 8);
ata_dev_id_string(dev, dev->product, ATA_ID_PROD_OFS,
sizeof(dev->product));
}
/**
* __ata_dev_select -
* @ap:
* @device:
*
* LOCKING:
*
*/
static void __ata_dev_select (struct ata_port *ap, unsigned int device)
{
u8 tmp;
if (device == 0)
tmp = ATA_DEVICE_OBS;
else
tmp = ATA_DEVICE_OBS | ATA_DEV1;
if (ap->flags & ATA_FLAG_MMIO) {
writeb(tmp, (void *) ap->ioaddr.device_addr);
} else {
outb(tmp, ap->ioaddr.device_addr);
}
ata_pause(ap); /* needed; also flushes, for mmio */
}
/**
* ata_dev_select -
* @ap:
* @device:
* @wait:
* @can_sleep:
*
* LOCKING:
*
* RETURNS:
*
*/
void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
VPRINTK("ENTER, ata%u: device %u, wait %u\n",
ap->id, device, wait);
if (wait)
ata_wait_idle(ap);
__ata_dev_select(ap, device);
if (wait) {
if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
msleep(150);
ata_wait_idle(ap);
}
}
/**
* ata_dump_id -
* @dev:
*
* LOCKING:
*/
static inline void ata_dump_id(struct ata_device *dev)
{
DPRINTK("49==0x%04x "
"53==0x%04x "
"63==0x%04x "
"64==0x%04x "
"75==0x%04x \n",
dev->id[49],
dev->id[53],
dev->id[63],
dev->id[64],
dev->id[75]);
DPRINTK("80==0x%04x "
"81==0x%04x "
"82==0x%04x "
"83==0x%04x "
"84==0x%04x \n",
dev->id[80],
dev->id[81],
dev->id[82],
dev->id[83],
dev->id[84]);
DPRINTK("88==0x%04x "
"93==0x%04x\n",
dev->id[88],
dev->id[93]);
}
/**
* ata_dev_identify - obtain IDENTIFY x DEVICE page
* @ap: port on which device we wish to probe resides
* @device: device bus address, starting at zero
*
* Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
* command, and read back the 512-byte device information page.
* The device information page is fed to us via the standard
* PIO-IN protocol, but we hand-code it here. (TODO: investigate
* using standard PIO-IN paths)
*
* After reading the device information page, we use several
* bits of information from it to initialize data structures
* that will be used during the lifetime of the ata_device.
* Other data from the info page is used to disqualify certain
* older ATA devices we do not wish to support.
*
* LOCKING:
* Inherited from caller. Some functions called by this function
* obtain the host_set lock.
*/
static void ata_dev_identify(struct ata_port *ap, unsigned int device)
{
struct ata_device *dev = &ap->device[device];
unsigned int i;
u16 tmp, udma_modes;
u8 status;
struct ata_taskfile tf;
unsigned int using_edd;
if (!ata_dev_present(dev)) {
DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
ap->id, device);
return;
}
if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
using_edd = 0;
else
using_edd = 1;
DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
dev->class == ATA_DEV_NONE);
ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
retry:
ata_tf_init(ap, &tf, device);
tf.ctl |= ATA_NIEN;
tf.protocol = ATA_PROT_PIO_READ;
if (dev->class == ATA_DEV_ATA) {
tf.command = ATA_CMD_ID_ATA;
DPRINTK("do ATA identify\n");
} else {
tf.command = ATA_CMD_ID_ATAPI;
DPRINTK("do ATAPI identify\n");
}
ata_tf_to_host(ap, &tf);
/* crazy ATAPI devices... */
if (dev->class == ATA_DEV_ATAPI)
msleep(150);
if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT))
goto err_out;
status = ata_chk_status(ap);
if (status & ATA_ERR) {
/*
* arg! EDD works for all test cases, but seems to return
* the ATA signature for some ATAPI devices. Until the
* reason for this is found and fixed, we fix up the mess
* here. If IDENTIFY DEVICE returns command aborted
* (as ATAPI devices do), then we issue an
* IDENTIFY PACKET DEVICE.
*
* ATA software reset (SRST, the default) does not appear
* to have this problem.
*/
if ((using_edd) && (tf.command == ATA_CMD_ID_ATA)) {
u8 err = ata_chk_err(ap);
if (err & ATA_ABORTED) {
dev->class = ATA_DEV_ATAPI;
goto retry;
}
}
goto err_out;
}
/* make sure we have BSY=0, DRQ=1 */
if ((status & ATA_DRQ) == 0) {
printk(KERN_WARNING "ata%u: dev %u (ATA%s?) not returning id page (0x%x)\n",
ap->id, device,
dev->class == ATA_DEV_ATA ? "" : "PI",
status);
goto err_out;
}
/* read IDENTIFY [X] DEVICE page */
if (ap->flags & ATA_FLAG_MMIO) {
for (i = 0; i < ATA_ID_WORDS; i++)
dev->id[i] = readw((void *)ap->ioaddr.data_addr);
} else
for (i = 0; i < ATA_ID_WORDS; i++)
dev->id[i] = inw(ap->ioaddr.data_addr);
/* wait for host_idle */
status = ata_wait_idle(ap);
if (status & (ATA_BUSY | ATA_DRQ)) {
printk(KERN_WARNING "ata%u: dev %u (ATA%s?) error after id page (0x%x)\n",
ap->id, device,
dev->class == ATA_DEV_ATA ? "" : "PI",
status);
goto err_out;
}
ata_irq_on(ap); /* re-enable interrupts */
/* print device capabilities */
printk(KERN_DEBUG "ata%u: dev %u cfg "
"49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
ap->id, device, dev->id[49],
dev->id[82], dev->id[83], dev->id[84],
dev->id[85], dev->id[86], dev->id[87],
dev->id[88]);
/*
* common ATA, ATAPI feature tests
*/
/* we require LBA and DMA support (bits 8 & 9 of word 49) */
if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) {
printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
goto err_out_nosup;
}
/* we require UDMA support */
udma_modes =
tmp = dev->id[ATA_ID_UDMA_MODES];
if ((tmp & 0xff) == 0) {
printk(KERN_DEBUG "ata%u: no udma\n", ap->id);
goto err_out_nosup;
}
ata_dump_id(dev);
ata_dev_parse_strings(dev);
/* ATA-specific feature tests */
if (dev->class == ATA_DEV_ATA) {
if (!ata_id_is_ata(dev)) /* sanity check */
goto err_out_nosup;
tmp = dev->id[ATA_ID_MAJOR_VER];
for (i = 14; i >= 1; i--)
if (tmp & (1 << i))
break;
/* we require at least ATA-3 */
if (i < 3) {
printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
goto err_out_nosup;
}
if (ata_id_has_lba48(dev)) {
dev->flags |= ATA_DFLAG_LBA48;
dev->n_sectors = ata_id_u64(dev, 100);
} else {
dev->n_sectors = ata_id_u32(dev, 60);
}
ap->host->max_cmd_len = 16;
/* print device info to dmesg */
printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors%s\n",
ap->id, device,
ata_udma_string(udma_modes),
dev->n_sectors,
dev->flags & ATA_DFLAG_LBA48 ? " (lba48)" : "");
}
/* ATAPI-specific feature tests */
else {
if (ata_id_is_ata(dev)) /* sanity check */
goto err_out_nosup;
/* see if 16-byte commands supported */
tmp = dev->id[0] & 0x3;
if (tmp == 1)
ap->host->max_cmd_len = 16;
/* print device info to dmesg */
printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
ap->id, device,
ata_udma_string(udma_modes));
}
DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
return;
err_out_nosup:
printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
ap->id, device);
err_out:
ata_irq_on(ap); /* re-enable interrupts */
dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
DPRINTK("EXIT, err\n");
}
/**
* ata_port_reset -
* @ap:
*
* LOCKING:
*/
static void ata_port_reset(struct ata_port *ap)
{
unsigned int i, found = 0;
ap->ops->phy_reset(ap);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
goto err_out;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
ata_dev_identify(ap, i);
if (ata_dev_present(&ap->device[i])) {
found = 1;
if (ap->ops->dev_config)
ap->ops->dev_config(ap, &ap->device[i]);
}
}
if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
goto err_out_disable;
ap->ops->phy_config(ap);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
goto err_out_disable;
ap->thr_state = THR_PROBE_SUCCESS;
return;
err_out_disable:
ap->ops->port_disable(ap);
err_out:
ap->thr_state = THR_PROBE_FAILED;
}
/**
* ata_port_probe -
* @ap:
*
* LOCKING:
*/
void ata_port_probe(struct ata_port *ap)
{
ap->flags &= ~ATA_FLAG_PORT_DISABLED;
}
/**
* sata_phy_reset -
* @ap:
*
* LOCKING:
*
*/
void sata_phy_reset(struct ata_port *ap)
{
u32 sstatus;
unsigned long timeout = jiffies + (HZ * 5);
scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
scr_read(ap, SCR_CONTROL); /* dummy read; flush */
udelay(400); /* FIXME: a guess */
scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/reset */
/* wait for phy to become ready, if necessary */
do {
msleep(200);
sstatus = scr_read(ap, SCR_STATUS);
if ((sstatus & 0xf) != 1)
break;
} while (time_before(jiffies, timeout));
/* TODO: phy layer with polling, timeouts, etc. */
if (sata_dev_present(ap))
ata_port_probe(ap);
else {
sstatus = scr_read(ap, SCR_STATUS);
printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
ap->id, sstatus);
ata_port_disable(ap);
}
if (ap->flags & ATA_FLAG_PORT_DISABLED)
return;
ata_bus_reset(ap);
}
/**
* ata_port_disable -
* @ap:
*
* LOCKING:
*/
void ata_port_disable(struct ata_port *ap)
{
ap->device[0].class = ATA_DEV_NONE;
ap->device[1].class = ATA_DEV_NONE;
ap->flags |= ATA_FLAG_PORT_DISABLED;
}
/**
* pata_phy_config -
* @ap:
*
* LOCKING:
*
*/
void pata_phy_config(struct ata_port *ap)
{
unsigned int force_pio;
ata_host_set_pio(ap);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
return;
ata_host_set_udma(ap);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
return;
#ifdef ATA_FORCE_PIO
force_pio = 1;
#else
force_pio = 0;
#endif
if (force_pio) {
ata_dev_set_pio(ap, 0);
ata_dev_set_pio(ap, 1);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
return;
} else {
ata_dev_set_udma(ap, 0);
ata_dev_set_udma(ap, 1);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
return;
}
}
/**
* ata_busy_sleep - sleep until BSY clears, or timeout
* @ap: port containing status register to be polled
* @tmout_pat: impatience timeout
* @tmout: overall timeout
*
* LOCKING:
*
*/
static unsigned int ata_busy_sleep (struct ata_port *ap,
unsigned long tmout_pat,
unsigned long tmout)
{
unsigned long timer_start, timeout;
u8 status;
status = ata_busy_wait(ap, ATA_BUSY, 300);
timer_start = jiffies;
timeout = timer_start + tmout_pat;
while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
msleep(50);
status = ata_busy_wait(ap, ATA_BUSY, 3);
}
if (status & ATA_BUSY)
printk(KERN_WARNING "ata%u is slow to respond, "
"please be patient\n", ap->id);
timeout = timer_start + tmout;
while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
msleep(50);
status = ata_chk_status(ap);
}
if (status & ATA_BUSY) {
printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
ap->id, tmout / HZ);
return 1;
}
return 0;
}
static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int dev0 = devmask & (1 << 0);
unsigned int dev1 = devmask & (1 << 1);
unsigned long timeout;
/* if device 0 was found in ata_dev_devchk, wait for its
* BSY bit to clear
*/
if (dev0)
ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
/* if device 1 was found in ata_dev_devchk, wait for
* register access, then wait for BSY to clear
*/
timeout = jiffies + ATA_TMOUT_BOOT;
while (dev1) {
u8 nsect, lbal;
__ata_dev_select(ap, 1);
if (ap->flags & ATA_FLAG_MMIO) {
nsect = readb((void *) ioaddr->nsect_addr);
lbal = readb((void *) ioaddr->lbal_addr);
} else {
nsect = inb(ioaddr->nsect_addr);
lbal = inb(ioaddr->lbal_addr);
}
if ((nsect == 1) && (lbal == 1))
break;
if (time_after(jiffies, timeout)) {
dev1 = 0;
break;
}
msleep(50); /* give drive a breather */
}
if (dev1)
ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
/* is all this really necessary? */
__ata_dev_select(ap, 0);
if (dev1)
__ata_dev_select(ap, 1);
if (dev0)
__ata_dev_select(ap, 0);
}
/**
* ata_bus_edd -
* @ap:
*
* LOCKING:
*
*/
static unsigned int ata_bus_edd(struct ata_port *ap)
{
struct ata_taskfile tf;
/* set up execute-device-diag (bus reset) taskfile */
/* also, take interrupts to a known state (disabled) */
DPRINTK("execute-device-diag\n");
ata_tf_init(ap, &tf, 0);
tf.ctl |= ATA_NIEN;
tf.command = ATA_CMD_EDD;
tf.protocol = ATA_PROT_NODATA;
/* do bus reset */
ata_tf_to_host(ap, &tf);
/* spec says at least 2ms. but who knows with those
* crazy ATAPI devices...
*/
msleep(150);
return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
}
static unsigned int ata_bus_softreset(struct ata_port *ap,
unsigned int devmask)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
DPRINTK("ata%u: bus reset via SRST\n", ap->id);
/* software reset. causes dev0 to be selected */
if (ap->flags & ATA_FLAG_MMIO) {
writeb(ap->ctl, ioaddr->ctl_addr);
udelay(10); /* FIXME: flush */
writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
udelay(10); /* FIXME: flush */
writeb(ap->ctl, ioaddr->ctl_addr);
} else {
outb(ap->ctl, ioaddr->ctl_addr);
udelay(10);
outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
udelay(10);
outb(ap->ctl, ioaddr->ctl_addr);
}
/* spec mandates ">= 2ms" before checking status.
* We wait 150ms, because that was the magic delay used for
* ATAPI devices in Hale Landis's ATADRVR, for the period of time
* between when the ATA command register is written, and then
* status is checked. Because waiting for "a while" before
* checking status is fine, post SRST, we perform this magic
* delay here as well.
*/
msleep(150);
ata_bus_post_reset(ap, devmask);
return 0;
}
/**
* ata_bus_reset - reset host port and associated ATA channel
* @ap: port to reset
*
* This is typically the first time we actually start issuing
* commands to the ATA channel. We wait for BSY to clear, then
* issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
* result. Determine what devices, if any, are on the channel
* by looking at the device 0/1 error register. Look at the signature
* stored in each device's taskfile registers, to determine if
* the device is ATA or ATAPI.
*
* LOCKING:
* Inherited from caller. Some functions called by this function
* obtain the host_set lock.
*
* SIDE EFFECTS:
* Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
*/
void ata_bus_reset(struct ata_port *ap)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
u8 err;
unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
/* set up device control */
if (ap->flags & ATA_FLAG_MMIO)
writeb(ap->ctl, ioaddr->ctl_addr);
else
outb(ap->ctl, ioaddr->ctl_addr);
/* determine if device 0/1 are present */
dev0 = ata_dev_devchk(ap, 0);
if (slave_possible)
dev1 = ata_dev_devchk(ap, 1);
if (dev0)
devmask |= (1 << 0);
if (dev1)
devmask |= (1 << 1);
/* select device 0 again */
__ata_dev_select(ap, 0);
/* issue bus reset */
if (ap->flags & ATA_FLAG_SRST)
rc = ata_bus_softreset(ap, devmask);
else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0)
rc = ata_bus_edd(ap);
if (rc)
goto err_out;
/*
* determine by signature whether we have ATA or ATAPI devices
*/
err = ata_dev_try_classify(ap, 0, dev0);
if ((slave_possible) && (err != 0x81))
ata_dev_try_classify(ap, 1, dev1);
/* re-enable interrupts */
ata_irq_on(ap);
/* is double-select really necessary? */
if (ap->device[1].class != ATA_DEV_NONE)
__ata_dev_select(ap, 1);
if (ap->device[0].class != ATA_DEV_NONE)
__ata_dev_select(ap, 0);
/* if no devices were detected, disable this port */
if ((ap->device[0].class == ATA_DEV_NONE) &&
(ap->device[1].class == ATA_DEV_NONE))
goto err_out;
DPRINTK("EXIT\n");
return;
err_out:
printk(KERN_ERR "ata%u: disabling port\n", ap->id);
ap->ops->port_disable(ap);
DPRINTK("EXIT\n");
}
/**
* ata_host_set_pio -
* @ap:
*
* LOCKING:
*/
static void ata_host_set_pio(struct ata_port *ap)
{
struct ata_device *master, *slave;
unsigned int pio, i;
u16 mask;
master = &ap->device[0];
slave = &ap->device[1];
assert (ata_dev_present(master) || ata_dev_present(slave));
mask = ap->pio_mask;
if (ata_dev_present(master))
mask &= (master->id[ATA_ID_PIO_MODES] & 0x03);
if (ata_dev_present(slave))
mask &= (slave->id[ATA_ID_PIO_MODES] & 0x03);
/* require pio mode 3 or 4 support for host and all devices */
if (mask == 0) {
printk(KERN_WARNING "ata%u: no PIO3/4 support, ignoring\n",
ap->id);
goto err_out;
}
pio = (mask & ATA_ID_PIO4) ? 4 : 3;
for (i = 0; i < ATA_MAX_DEVICES; i++)
if (ata_dev_present(&ap->device[i])) {
ap->device[i].pio_mode = (pio == 3) ?
XFER_PIO_3 : XFER_PIO_4;
ap->ops->set_piomode(ap, &ap->device[i], pio);
}
return;
err_out:
ap->ops->port_disable(ap);
}
/**
* ata_host_set_udma -
* @ap:
*
* LOCKING:
*/
static void ata_host_set_udma(struct ata_port *ap)
{
struct ata_device *master, *slave;
u16 mask;
unsigned int i, j;
int udma_mode = -1;
master = &ap->device[0];
slave = &ap->device[1];
assert (ata_dev_present(master) || ata_dev_present(slave));
assert ((ap->flags & ATA_FLAG_PORT_DISABLED) == 0);
DPRINTK("udma masks: host 0x%X, master 0x%X, slave 0x%X\n",
ap->udma_mask,
(!ata_dev_present(master)) ? 0xff :
(master->id[ATA_ID_UDMA_MODES] & 0xff),
(!ata_dev_present(slave)) ? 0xff :
(slave->id[ATA_ID_UDMA_MODES] & 0xff));
mask = ap->udma_mask;
if (ata_dev_present(master))
mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
if (ata_dev_present(slave))
mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
i = XFER_UDMA_7;
while (i >= XFER_UDMA_0) {
j = i - XFER_UDMA_0;
DPRINTK("mask 0x%X i 0x%X j %u\n", mask, i, j);
if (mask & (1 << j)) {
udma_mode = i;
break;
}
i--;
}
/* require udma for host and all attached devices */
if (udma_mode < 0) {
printk(KERN_WARNING "ata%u: no UltraDMA support, ignoring\n",
ap->id);
goto err_out;
}
for (i = 0; i < ATA_MAX_DEVICES; i++)
if (ata_dev_present(&ap->device[i])) {
ap->device[i].udma_mode = udma_mode;
ap->ops->set_udmamode(ap, &ap->device[i], udma_mode);
}
return;
err_out:
ap->ops->port_disable(ap);
}
/**
* ata_dev_set_xfermode -
* @ap:
* @dev:
*
* LOCKING:
*/
static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
{
struct ata_taskfile tf;
/* set up set-features taskfile */
DPRINTK("set features - xfer mode\n");
ata_tf_init(ap, &tf, dev->devno);
tf.ctl |= ATA_NIEN;
tf.command = ATA_CMD_SET_FEATURES;
tf.feature = SETFEATURES_XFER;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
if (dev->flags & ATA_DFLAG_PIO)
tf.nsect = dev->pio_mode;
else
tf.nsect = dev->udma_mode;
/* do bus reset */
ata_tf_to_host(ap, &tf);
/* crazy ATAPI devices... */
if (dev->class == ATA_DEV_ATAPI)
msleep(150);
ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
ata_irq_on(ap); /* re-enable interrupts */
ata_wait_idle(ap);
DPRINTK("EXIT\n");
}
/**
* ata_dev_set_udma -
* @ap:
* @device:
*
* LOCKING:
*/
static void ata_dev_set_udma(struct ata_port *ap, unsigned int device)
{
struct ata_device *dev = &ap->device[device];
if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
return;
ata_dev_set_xfermode(ap, dev);
assert((dev->udma_mode >= XFER_UDMA_0) &&
(dev->udma_mode <= XFER_UDMA_7));
printk(KERN_INFO "ata%u: dev %u configured for %s\n",
ap->id, device,
udma_str[dev->udma_mode - XFER_UDMA_0]);
}
/**
* ata_dev_set_pio -
* @ap:
* @device:
*
* LOCKING:
*/
static void ata_dev_set_pio(struct ata_port *ap, unsigned int device)
{
struct ata_device *dev = &ap->device[device];
if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
return;
/* force PIO mode */
dev->flags |= ATA_DFLAG_PIO;
ata_dev_set_xfermode(ap, dev);
assert((dev->pio_mode >= XFER_PIO_3) &&
(dev->pio_mode <= XFER_PIO_4));
printk(KERN_INFO "ata%u: dev %u configured for PIO%c\n",
ap->id, device,
dev->pio_mode == 3 ? '3' : '4');
}
/**
* ata_sg_clean -
* @qc:
*
* LOCKING:
*/
static void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
Scsi_Cmnd *cmd = qc->scsicmd;
struct scatterlist *sg = qc->sg;
int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
assert(dir == SCSI_DATA_READ || dir == SCSI_DATA_WRITE);
assert(qc->flags & ATA_QCFLAG_SG);
assert(sg != NULL);
if (!cmd->use_sg)
assert(qc->n_elem == 1);
DPRINTK("unmapping %u sg elements\n", qc->n_elem);
if (cmd->use_sg)
pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
else
pci_unmap_single(ap->host_set->pdev, sg[0].dma_address,
sg[0].length, dir);
qc->flags &= ~ATA_QCFLAG_SG;
qc->sg = NULL;
}
/**
* ata_fill_sg -
* @qc:
*
* LOCKING:
*
*/
void ata_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap;
unsigned int i;
assert(sg != NULL);
assert(qc->n_elem > 0);
for (i = 0; i < qc->n_elem; i++) {
ap->prd[i].addr = cpu_to_le32(sg[i].dma_address);
ap->prd[i].flags_len = cpu_to_le32(sg[i].length);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n",
i, le32_to_cpu(ap->prd[i].addr), le32_to_cpu(ap->prd[i].flags_len));
}
ap->prd[qc->n_elem - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_sg_setup_one -
* @qc:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
*
*/
static int ata_sg_setup_one(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
Scsi_Cmnd *cmd = qc->scsicmd;
int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
struct scatterlist *sg = qc->sg;
unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG);
assert(sg == &qc->sgent);
assert(qc->n_elem == 1);
sg->page = virt_to_page(cmd->request_buffer);
sg->offset = (unsigned long) cmd->request_buffer & ~PAGE_MASK;
sg->length = cmd->request_bufflen;
if (!have_sg)
return 0;
sg->dma_address = pci_map_single(ap->host_set->pdev,
cmd->request_buffer,
cmd->request_bufflen, dir);
DPRINTK("mapped buffer of %d bytes for %s\n", cmd->request_bufflen,
qc->flags & ATA_QCFLAG_WRITE ? "write" : "read");
return 0;
}
/**
* ata_sg_setup -
* @qc:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
*
*/
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
Scsi_Cmnd *cmd = qc->scsicmd;
struct scatterlist *sg;
int n_elem;
unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG);
VPRINTK("ENTER, ata%u, use_sg %d\n", ap->id, cmd->use_sg);
assert(cmd->use_sg > 0);
sg = (struct scatterlist *)cmd->request_buffer;
if (have_sg) {
int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
n_elem = pci_map_sg(ap->host_set->pdev, sg, cmd->use_sg, dir);
if (n_elem < 1)
return -1;
DPRINTK("%d sg elements mapped\n", n_elem);
} else {
n_elem = cmd->use_sg;
}
qc->n_elem = n_elem;
return 0;
}
/**
* ata_pio_poll -
* @ap:
*
* LOCKING:
*
* RETURNS:
*
*/
static unsigned long ata_pio_poll(struct ata_port *ap)
{
u8 status;
unsigned int poll_state = THR_UNKNOWN;
unsigned int reg_state = THR_UNKNOWN;
const unsigned int tmout_state = THR_PIO_TMOUT;
switch (ap->thr_state) {
case THR_PIO:
case THR_PIO_POLL:
poll_state = THR_PIO_POLL;
reg_state = THR_PIO;
break;
case THR_PIO_LAST:
case THR_PIO_LAST_POLL:
poll_state = THR_PIO_LAST_POLL;
reg_state = THR_PIO_LAST;
break;
default:
BUG();
break;
}
status = ata_chk_status(ap);
if (status & ATA_BUSY) {
if (time_after(jiffies, ap->thr_timeout)) {
ap->thr_state = tmout_state;
return 0;
}
ap->thr_state = poll_state;
return ATA_SHORT_PAUSE;
}
ap->thr_state = reg_state;
return 0;
}
/**
* ata_pio_start -
* @qc:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_pio_start (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
assert((qc->tf.protocol == ATA_PROT_PIO_READ) ||
(qc->tf.protocol == ATA_PROT_PIO_WRITE));
qc->flags |= ATA_QCFLAG_POLL;
qc->tf.ctl |= ATA_NIEN; /* disable interrupts */
ata_tf_to_host_nolock(ap, &qc->tf);
ata_thread_wake(ap, THR_PIO);
}
/**
* ata_pio_complete -
* @ap:
*
* LOCKING:
*/
static void ata_pio_complete (struct ata_port *ap)
{
struct ata_queued_cmd *qc;
unsigned long flags;
u8 drv_stat;
/*
* This is purely hueristic. This is a fast path.
* Sometimes when we enter, BSY will be cleared in
* a chk-status or two. If not, the drive is probably seeking
* or something. Snooze for a couple msecs, then
* chk-status again. If still busy, fall back to
* THR_PIO_POLL state.
*/
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
msleep(2);
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
ap->thr_state = THR_PIO_LAST_POLL;
ap->thr_timeout = jiffies + ATA_TMOUT_PIO;
return;
}
}
drv_stat = ata_wait_idle(ap);
if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
ap->thr_state = THR_PIO_ERR;
return;
}
qc = ata_qc_from_tag(ap, ap->active_tag);
assert(qc != NULL);
spin_lock_irqsave(&ap->host_set->lock, flags);
ap->thr_state = THR_IDLE;
spin_unlock_irqrestore(&ap->host_set->lock, flags);
ata_irq_on(ap);
ata_qc_complete(qc, drv_stat, 0);
}
/**
* ata_pio_sector -
* @ap:
*
* LOCKING:
*/
static void ata_pio_sector(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
struct scatterlist *sg;
Scsi_Cmnd *cmd;
unsigned char *buf;
u8 status;
/*
* This is purely hueristic. This is a fast path.
* Sometimes when we enter, BSY will be cleared in
* a chk-status or two. If not, the drive is probably seeking
* or something. Snooze for a couple msecs, then
* chk-status again. If still busy, fall back to
* THR_PIO_POLL state.
*/
status = ata_busy_wait(ap, ATA_BUSY, 5);
if (status & ATA_BUSY) {
msleep(2);
status = ata_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
ap->thr_state = THR_PIO_POLL;
ap->thr_timeout = jiffies + ATA_TMOUT_PIO;
return;
}
}
/* handle BSY=0, DRQ=0 as error */
if ((status & ATA_DRQ) == 0) {
ap->thr_state = THR_PIO_ERR;
return;
}
qc = ata_qc_from_tag(ap, ap->active_tag);
assert(qc != NULL);
cmd = qc->scsicmd;
sg = qc->sg;
if (qc->cursect == (qc->nsect - 1))
ap->thr_state = THR_PIO_LAST;
buf = kmap(sg[qc->cursg].page) +
sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE);
qc->cursect++;
qc->cursg_ofs++;
if (cmd->use_sg)
if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg[qc->cursg].length) {
qc->cursg++;
qc->cursg_ofs = 0;
}
DPRINTK("data %s, drv_stat 0x%X\n",
qc->flags & ATA_QCFLAG_WRITE ? "write" : "read",
status);
/* do the actual data transfer */
/* FIXME: mmio-ize */
if (qc->flags & ATA_QCFLAG_WRITE)
outsl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS);
else
insl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS);
kunmap(sg[qc->cursg].page);
}
/**
* ata_eng_schedule - run an iteration of the pio/dma/whatever engine
* @ap: port on which activity will occur
* @eng: instance of engine
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_eng_schedule (struct ata_port *ap, struct ata_engine *eng)
{
/* FIXME */
}
/**
* ata_eng_timeout - Handle timeout of queued command
* @ap: Port on which timed-out command is active
*
* Some part of the kernel (currently, only the SCSI layer)
* has noticed that the active command on port @ap has not
* completed after a specified length of time. Handle this
* condition by disabling DMA (if necessary) and completing
* transactions, with error if necessary.
*
* This also handles the case of the "lost interrupt", where
* for some reason (possibly hardware bug, possibly driver bug)
* an interrupt was not delivered to the driver, even though the
* transaction completed successfully.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*/
void ata_eng_timeout(struct ata_port *ap)
{
u8 host_stat, drv_stat;
struct ata_queued_cmd *qc;
DPRINTK("ENTER\n");
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
ap->id);
goto out;
}
switch (qc->tf.protocol) {
case ATA_PROT_DMA_READ:
case ATA_PROT_DMA_WRITE:
if (ap->flags & ATA_FLAG_MMIO) {
void *mmio = (void *) ap->ioaddr.bmdma_addr;
host_stat = readb(mmio + ATA_DMA_STATUS);
} else
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
printk(KERN_ERR "ata%u: DMA timeout, stat 0x%x\n",
ap->id, host_stat);
ata_dma_complete(ap, host_stat, 1);
break;
case ATA_PROT_NODATA:
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n",
ap->id, qc->tf.command, drv_stat);
ata_qc_complete(qc, drv_stat, 1);
break;
default:
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
ap->id, qc->tf.command, drv_stat);
ata_qc_complete(qc, drv_stat, 1);
break;
}
out:
DPRINTK("EXIT\n");
}
/**
* ata_qc_new -
* @ap:
* @dev:
*
* LOCKING:
*/
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
unsigned int i;
for (i = 0; i < ATA_MAX_QUEUE; i++)
if (!test_and_set_bit(i, &ap->qactive)) {
qc = ata_qc_from_tag(ap, i);
break;
}
if (qc)
qc->tag = i;
return qc;
}
/**
* ata_qc_new_init -
* @ap:
* @dev:
*
* LOCKING:
*/
struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev)
{
struct ata_queued_cmd *qc;
qc = ata_qc_new(ap);
if (qc) {
qc->sg = NULL;
qc->flags = 0;
qc->scsicmd = NULL;
qc->ap = ap;
qc->dev = dev;
INIT_LIST_HEAD(&qc->node);
init_MUTEX_LOCKED(&qc->sem);
ata_tf_init(ap, &qc->tf, dev->devno);
if (likely((dev->flags & ATA_DFLAG_PIO) == 0))
qc->flags |= ATA_QCFLAG_DMA;
if (dev->flags & ATA_DFLAG_LBA48)
qc->tf.flags |= ATA_TFLAG_LBA48;
}
return qc;
}
/**
* ata_qc_complete -
* @qc:
* @drv_stat:
* @done_late:
*
* LOCKING:
*
*/
void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late)
{
struct ata_port *ap = qc->ap;
Scsi_Cmnd *cmd = qc->scsicmd;
unsigned int tag, do_clear = 0;
assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
assert(qc->flags & ATA_QCFLAG_ACTIVE);
if (likely(qc->flags & ATA_QCFLAG_SG))
ata_sg_clean(qc);
if (cmd) {
if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) {
if (qc->flags & ATA_QCFLAG_ATAPI)
cmd->result = SAM_STAT_CHECK_CONDITION;
else
ata_to_sense_error(qc);
} else {
cmd->result = SAM_STAT_GOOD;
}
qc->scsidone(cmd);
}
qc->flags &= ~ATA_QCFLAG_ACTIVE;
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
if (tag == ap->active_tag)
ap->active_tag = ATA_TAG_POISON;
qc->tag = ATA_TAG_POISON;
do_clear = 1;
}
up(&qc->sem);
if (likely(do_clear))
clear_bit(tag, &ap->qactive);
}
/**
* ata_qc_push -
* @qc:
* @append:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append)
{
struct ata_port *ap = qc->ap;
struct ata_engine *eng = &ap->eng;
if (likely(append))
list_add_tail(&qc->node, &eng->q);
else
list_add(&qc->node, &eng->q);
if (!test_and_set_bit(ATA_EFLG_ACTIVE, &eng->flags))
ata_eng_schedule(ap, eng);
}
/**
* ata_qc_issue -
* @qc:
*
* LOCKING:
*
* RETURNS:
*
*/
int ata_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
Scsi_Cmnd *cmd = qc->scsicmd;
unsigned int dma = qc->flags & ATA_QCFLAG_DMA;
ata_dev_select(ap, qc->dev->devno, 1, 0);
/* set up SG table */
if (cmd->use_sg) {
if (ata_sg_setup(qc))
goto err_out;
} else {
if (ata_sg_setup_one(qc))
goto err_out;
}
ap->ops->fill_sg(qc);
qc->ap->active_tag = qc->tag;
qc->flags |= ATA_QCFLAG_ACTIVE;
if (likely(dma)) {
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_start(qc); /* initiate bmdma */
} else
/* load tf registers, initiate polling pio */
ata_pio_start(qc);
return 0;
err_out:
return -1;
}
/**
* ata_bmdma_start_mmio -
* @qc:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE);
u8 host_stat, dmactl;
void *mmio = (void *) ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
/* specify data direction */
/* FIXME: redundant to later start-dma command? */
writeb(rw ? 0 : ATA_DMA_WR, mmio + ATA_DMA_CMD);
/* clear interrupt, error bits */
host_stat = readb(mmio + ATA_DMA_STATUS);
writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, mmio + ATA_DMA_STATUS);
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
/* start host DMA transaction */
dmactl = readb(mmio + ATA_DMA_CMD);
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
/* Strictly, one may wish to issue a readb() here, to
* flush the mmio write. However, control also passes
* to the hardware at this point, and it will interrupt
* us when we are to resume control. So, in effect,
* we don't care when the mmio write flushes.
* Further, a read of the DMA status register _immediately_
* following the write may not be what certain flaky hardware
* is expected, so I think it is best to not add a readb()
* without first all the MMIO ATA cards/mobos.
* Or maybe I'm just being paranoid.
*/
}
/**
* ata_bmdma_start_pio -
* @qc:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE);
u8 host_stat, dmactl;
/* load PRD table addr. */
outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction */
/* FIXME: redundant to later start-dma command? */
outb(rw ? 0 : ATA_DMA_WR, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* clear interrupt, error bits */
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
/* start host DMA transaction */
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
outb(dmactl | ATA_DMA_START,
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
/**
* ata_dma_complete -
* @ap:
* @host_stat:
* @done_late:
*
* LOCKING:
*/
static void ata_dma_complete(struct ata_port *ap, u8 host_stat,
unsigned int done_late)
{
VPRINTK("ENTER\n");
if (ap->flags & ATA_FLAG_MMIO) {
void *mmio = (void *) ap->ioaddr.bmdma_addr;
/* clear start/stop bit */
writeb(0, mmio + ATA_DMA_CMD);
/* ack intr, err bits */
writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
mmio + ATA_DMA_STATUS);
} else {
/* clear start/stop bit */
outb(0, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* ack intr, err bits */
outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_altstatus(ap); /* dummy read */
DPRINTK("host %u, host_stat==0x%X, drv_stat==0x%X\n",
ap->id, (u32) host_stat, (u32) ata_chk_status(ap));
/* get drive status; clear intr; complete txn */
ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
ata_wait_idle(ap), done_late);
}
/**
* ata_host_intr - Handle host interrupt for given (port, task)
* @ap: Port on which interrupt arrived (possibly...)
* @qc: Taskfile currently active in engine
*
* Handle host interrupt for given queued command. Currently,
* only DMA interrupts are handled. All other commands are
* handled via polling with interrupts disabled (nIEN bit).
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
* One if interrupt was handled, zero if not (shared irq).
*/
static inline unsigned int ata_host_intr (struct ata_port *ap,
struct ata_queued_cmd *qc)
{
u8 status, host_stat;
unsigned int handled = 0;
switch (qc->tf.protocol) {
case ATA_PROT_DMA_READ:
case ATA_PROT_DMA_WRITE:
if (ap->flags & ATA_FLAG_MMIO) {
void *mmio = (void *) ap->ioaddr.bmdma_addr;
host_stat = readb(mmio + ATA_DMA_STATUS);
} else
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat);
if (!(host_stat & ATA_DMA_INTR)) {
ap->stats.idle_irq++;
break;
}
ata_dma_complete(ap, host_stat, 0);
handled = 1;
break;
case ATA_PROT_NODATA: /* command completion, but no data xfer */
status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
ata_qc_complete(qc, status, 0);
handled = 1;
break;
default:
ap->stats.idle_irq++;
#ifdef ATA_IRQ_TRAP
if ((ap->stats.idle_irq % 1000) == 0) {
handled = 1;
ata_irq_ack(ap, 0); /* debug trap */
printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
}
#endif
break;
}
return handled;
}
/**
* ata_interrupt -
* @irq:
* @dev_instance:
* @regs:
*
* LOCKING:
*
* RETURNS:
*
*/
irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
spin_lock_irqsave(&host_set->lock, flags);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap;
ap = host_set->ports[i];
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0))
handled += ata_host_intr(ap, qc);
}
}
spin_unlock_irqrestore(&host_set->lock, flags);
return IRQ_RETVAL(handled);
}
/**
* ata_thread_wake -
* @ap:
* @thr_state:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_thread_wake(struct ata_port *ap, unsigned int thr_state)
{
assert(ap->thr_state == THR_IDLE);
ap->thr_state = thr_state;
up(&ap->thr_sem);
}
/**
* ata_thread_timer -
* @opaque:
*
* LOCKING:
*/
static void ata_thread_timer(unsigned long opaque)
{
struct ata_port *ap = (struct ata_port *) opaque;
up(&ap->thr_sem);
}
/**
* ata_thread_iter -
* @ap:
*
* LOCKING:
*
* RETURNS:
*
*/
static unsigned long ata_thread_iter(struct ata_port *ap)
{
long timeout = 0;
DPRINTK("ata%u: thr_state %s\n",
ap->id, ata_thr_state_name(ap->thr_state));
switch (ap->thr_state) {
case THR_UNKNOWN:
ap->thr_state = THR_PORT_RESET;
break;
case THR_PROBE_START:
down(&ap->sem);
ap->thr_state = THR_PORT_RESET;
break;
case THR_PORT_RESET:
ata_port_reset(ap);
break;
case THR_PROBE_SUCCESS:
up(&ap->probe_sem);
ap->thr_state = THR_IDLE;
break;
case THR_PROBE_FAILED:
up(&ap->probe_sem);
ap->thr_state = THR_AWAIT_DEATH;
break;
case THR_AWAIT_DEATH:
timeout = -1;
break;
case THR_IDLE:
timeout = 30 * HZ;
break;
case THR_PIO:
ata_pio_sector(ap);
break;
case THR_PIO_LAST:
ata_pio_complete(ap);
break;
case THR_PIO_POLL:
case THR_PIO_LAST_POLL:
timeout = ata_pio_poll(ap);
break;
case THR_PIO_TMOUT:
printk(KERN_ERR "ata%d: FIXME: THR_PIO_TMOUT\n", /* FIXME */
ap->id);
timeout = 11 * HZ;
break;
case THR_PIO_ERR:
printk(KERN_ERR "ata%d: FIXME: THR_PIO_ERR\n", /* FIXME */
ap->id);
timeout = 11 * HZ;
break;
case THR_PACKET:
atapi_cdb_send(ap);
break;
default:
printk(KERN_DEBUG "ata%u: unknown thr state %s\n",
ap->id, ata_thr_state_name(ap->thr_state));
break;
}
DPRINTK("ata%u: new thr_state %s, returning %ld\n",
ap->id, ata_thr_state_name(ap->thr_state), timeout);
return timeout;
}
/**
* ata_thread -
* @data:
*
* LOCKING:
*
* RETURNS:
*
*/
static int ata_thread (void *data)
{
struct ata_port *ap = data;
long timeout;
daemonize ("katad-%u", ap->id);
allow_signal(SIGTERM);
while (1) {
cond_resched();
timeout = ata_thread_iter(ap);
if (signal_pending (current))
flush_signals(current);
if ((timeout < 0) || (ap->time_to_die))
break;
/* note sleeping for full timeout not guaranteed (that's ok) */
if (timeout) {
mod_timer(&ap->thr_timer, jiffies + timeout);
down_interruptible(&ap->thr_sem);
if (signal_pending (current))
flush_signals(current);
if (ap->time_to_die)
break;
}
}
printk(KERN_DEBUG "ata%u: thread exiting\n", ap->id);
ap->thr_pid = -1;
complete_and_exit (&ap->thr_exited, 0);
}
/**
* ata_thread_kill - kill per-port kernel thread
* @ap: port those thread is to be killed
*
* LOCKING:
*
*/
static int ata_thread_kill(struct ata_port *ap)
{
int ret = 0;
if (ap->thr_pid >= 0) {
ap->time_to_die = 1;
wmb();
ret = kill_proc(ap->thr_pid, SIGTERM, 1);
if (ret)
printk(KERN_ERR "ata%d: unable to kill kernel thread\n",
ap->id);
else
wait_for_completion(&ap->thr_exited);
}
return ret;
}
/**
* atapi_cdb_send - Write CDB bytes to hardware
* @ap: Port to which ATAPI device is attached.
*
* When device has indicated its readiness to accept
* a CDB, this function is called. Send the CDB.
* If DMA is to be performed, exit immediately.
* Otherwise, we are in polling mode, so poll
* status under operation succeeds or fails.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
static void atapi_cdb_send(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
u8 status;
qc = ata_qc_from_tag(ap, ap->active_tag);
assert(qc != NULL);
assert(qc->flags & ATA_QCFLAG_ACTIVE);
/* sleep-wait for BSY to clear */
DPRINTK("busy wait\n");
if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
goto err_out;
/* make sure DRQ is set */
status = ata_chk_status(ap);
if ((status & ATA_DRQ) == 0)
goto err_out;
/* send SCSI cdb */
/* FIXME: mmio-ize */
DPRINTK("send cdb\n");
outsl(ap->ioaddr.data_addr,
qc->scsicmd->cmnd, ap->host->max_cmd_len / 4);
/* if we are DMA'ing, irq handler takes over from here */
if (qc->tf.feature == ATAPI_PKT_DMA)
goto out;
/* sleep-wait for BSY to clear */
DPRINTK("busy wait 2\n");
if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
goto err_out;
/* wait for BSY,DRQ to clear */
status = ata_wait_idle(ap);
if (status & (ATA_BUSY | ATA_DRQ))
goto err_out;
/* transaction completed, indicate such to scsi stack */
ata_qc_complete(qc, status, 0);
ata_irq_on(ap);
out:
ap->thr_state = THR_IDLE;
return;
err_out:
ata_qc_complete(qc, ATA_ERR, 0);
goto out;
}
/**
* ata_host_remove -
* @ap:
* @do_unregister:
*
* LOCKING:
*/
static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
{
struct Scsi_Host *sh = ap->host;
DPRINTK("ENTER\n");
if (do_unregister)
scsi_remove_host(sh); /* FIXME: check return val */
ata_thread_kill(ap); /* FIXME: check return val */
pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
}
/**
* ata_host_init -
* @host:
* @ent:
* @port_no:
*
* LOCKING:
*
*/
static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
struct ata_host_set *host_set,
struct ata_probe_ent *ent, unsigned int port_no)
{
unsigned int i;
host->max_id = 16;
host->max_lun = 1;
host->max_channel = 1;
host->unique_id = ata_unique_id++;
host->max_cmd_len = 12;
scsi_set_device(host, &ent->pdev->dev);
ap->flags = ATA_FLAG_PORT_DISABLED;
ap->id = host->unique_id;
ap->host = host;
ap->ctl = ATA_DEVCTL_OBS;
ap->host_set = host_set;
ap->port_no = port_no;
ap->pio_mask = ent->pio_mask;
ap->udma_mask = ent->udma_mask;
ap->flags |= ent->host_flags;
ap->ops = ent->port_ops;
ap->thr_state = THR_PROBE_START;
ap->cbl = ATA_CBL_NONE;
ap->device[0].flags = ATA_DFLAG_MASTER;
ap->active_tag = ATA_TAG_POISON;
/* ata_engine init */
ap->eng.flags = 0;
INIT_LIST_HEAD(&ap->eng.q);
for (i = 0; i < ATA_MAX_DEVICES; i++)
ap->device[i].devno = i;
init_completion(&ap->thr_exited);
init_MUTEX_LOCKED(&ap->probe_sem);
init_MUTEX_LOCKED(&ap->sem);
init_MUTEX_LOCKED(&ap->thr_sem);
init_timer(&ap->thr_timer);
ap->thr_timer.function = ata_thread_timer;
ap->thr_timer.data = (unsigned long) ap;
#ifdef ATA_IRQ_TRAP
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
}
/**
* ata_host_add -
* @ent:
* @host_set:
* @port_no:
*
* LOCKING:
*
* RETURNS:
*
*/
static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
struct ata_host_set *host_set,
unsigned int port_no)
{
struct pci_dev *pdev = ent->pdev;
struct Scsi_Host *host;
struct ata_port *ap;
DPRINTK("ENTER\n");
host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
if (!host)
return NULL;
ap = (struct ata_port *) &host->hostdata[0];
ata_host_init(ap, host, host_set, ent, port_no);
ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma);
if (!ap->prd)
goto err_out;
DPRINTK("prd alloc, virt %p, dma %x\n", ap->prd, ap->prd_dma);
ap->thr_pid = kernel_thread(ata_thread, ap, CLONE_FS | CLONE_FILES);
if (ap->thr_pid < 0) {
printk(KERN_ERR "ata%d: unable to start kernel thread\n",
ap->id);
goto err_out_free;
}
return ap;
err_out_free:
pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
err_out:
scsi_host_put(host);
return NULL;
}
/**
* ata_device_add -
* @ent:
*
* LOCKING:
*
* RETURNS:
*
*/
int ata_device_add(struct ata_probe_ent *ent)
{
unsigned int count = 0, i;
struct pci_dev *pdev = ent->pdev;
struct ata_host_set *host_set;
DPRINTK("ENTER\n");
/* alloc a container for our list of ATA ports (buses) */
host_set = kmalloc(sizeof(struct ata_host_set) +
(ent->n_ports * sizeof(void *)), GFP_KERNEL);
if (!host_set)
return 0;
memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
spin_lock_init(&host_set->lock);
host_set->pdev = pdev;
host_set->n_ports = ent->n_ports;
host_set->irq = ent->irq;
host_set->mmio_base = ent->mmio_base;
/* register each port bound to this device */
for (i = 0; i < ent->n_ports; i++) {
struct ata_port *ap;
ap = ata_host_add(ent, host_set, i);
if (!ap)
goto err_out;
host_set->ports[i] = ap;
/* print per-port info to dmesg */
printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
"bmdma 0x%lX irq %lu\n",
ap->id,
ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
ata_udma_string(ent->udma_mask),
ap->ioaddr.cmd_addr,
ap->ioaddr.ctl_addr,
ap->ioaddr.bmdma_addr,
ent->irq);
count++;
}
if (!count) {
kfree(host_set);
return 0;
}
/* obtain irq, that is shared between channels */
if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
DRV_NAME, host_set))
goto err_out;
/* perform each probe synchronously */
DPRINTK("probe begin\n");
for (i = 0; i < count; i++) {
struct ata_port *ap;
int rc;
ap = host_set->ports[i];
DPRINTK("ata%u: probe begin\n", ap->id);
up(&ap->sem); /* start probe */
DPRINTK("ata%u: probe-wait begin\n", ap->id);
down(&ap->probe_sem); /* wait for end */
DPRINTK("ata%u: probe-wait end\n", ap->id);
rc = scsi_add_host(ap->host, &pdev->dev);
if (rc) {
printk(KERN_ERR "ata%u: scsi_add_host failed\n",
ap->id);
/* FIXME: do something useful here */
/* FIXME: handle unconditional calls to
* scsi_scan_host and ata_host_remove, below,
* at the very least
*/
}
}
/* probes are done, now scan each port's disk(s) */
DPRINTK("probe begin\n");
for (i = 0; i < count; i++) {
struct ata_port *ap = host_set->ports[i];
scsi_scan_host(ap->host);
}
pci_set_drvdata(pdev, host_set);
VPRINTK("EXIT, returning %u\n", ent->n_ports);
return ent->n_ports; /* success */
err_out:
for (i = 0; i < count; i++) {
ata_host_remove(host_set->ports[i], 1);
scsi_host_put(host_set->ports[i]->host);
}
kfree(host_set);
VPRINTK("EXIT, returning 0\n");
return 0;
}
/**
* ata_scsi_release - SCSI layer callback hook for host unload
* @host: libata host to be unloaded
*
* Performs all duties necessary to shut down a libata port:
* Kill port kthread, disable port, and release resources.
*
* LOCKING:
* Inherited from SCSI layer.
*
* RETURNS:
* One.
*/
int ata_scsi_release(struct Scsi_Host *host)
{
struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
DPRINTK("ENTER\n");
ap->ops->port_disable(ap);
ata_host_remove(ap, 0);
DPRINTK("EXIT\n");
return 1;
}
/**
* ata_std_ports - initialize ioaddr with standard port offsets.
* @ioaddr:
*/
void ata_std_ports(struct ata_ioports *ioaddr)
{
ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
ioaddr->cmdstat_addr = ioaddr->cmd_addr + ATA_REG_CMD;
}
/**
* ata_pci_init_one -
* @pdev:
* @port_info:
* @n_ports:
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
*
*/
int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports)
{
struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
struct ata_port_info *port0, *port1;
u8 tmp8, mask;
unsigned int legacy_mode = 0;
int rc;
DPRINTK("ENTER\n");
port0 = port_info[0];
if (n_ports > 1)
port1 = port_info[1];
else
port1 = port0;
if ((port0->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
/* TODO: support transitioning to native mode? */
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
mask = (1 << 2) | (1 << 0);
if ((tmp8 & mask) != mask)
legacy_mode = (1 << 3);
}
/* FIXME... */
if ((!legacy_mode) && (n_ports > 1)) {
printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
return -EINVAL;
}
rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
if (legacy_mode) {
if (!request_region(0x1f0, 8, "libata")) {
struct resource *conflict, res;
res.start = 0x1f0;
res.end = 0x1f0 + 8 - 1;
conflict = ____request_resource(&ioport_resource, &res);
if (!strcmp(conflict->name, "libata"))
legacy_mode |= (1 << 0);
else
printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
} else
legacy_mode |= (1 << 0);
if (!request_region(0x170, 8, "libata")) {
struct resource *conflict, res;
res.start = 0x170;
res.end = 0x170 + 8 - 1;
conflict = ____request_resource(&ioport_resource, &res);
if (!strcmp(conflict->name, "libata"))
legacy_mode |= (1 << 1);
else
printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
} else
legacy_mode |= (1 << 1);
}
/* we have legacy mode, but all ports are unavailable */
if (legacy_mode == (1 << 3)) {
rc = -EBUSY;
goto err_out_regions;
}
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
goto err_out_regions;
probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
if (!probe_ent) {
rc = -ENOMEM;
goto err_out_regions;
}
memset(probe_ent, 0, sizeof(*probe_ent));
probe_ent->pdev = pdev;
INIT_LIST_HEAD(&probe_ent->node);
if (legacy_mode) {
probe_ent2 = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
if (!probe_ent2) {
rc = -ENOMEM;
goto err_out_free_ent;
}
memset(probe_ent2, 0, sizeof(*probe_ent));
probe_ent2->pdev = pdev;
INIT_LIST_HEAD(&probe_ent2->node);
}
probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
probe_ent->sht = port0->sht;
probe_ent->host_flags = port0->host_flags;
probe_ent->pio_mask = port0->pio_mask;
probe_ent->udma_mask = port0->udma_mask;
probe_ent->port_ops = port0->port_ops;
if (legacy_mode) {
probe_ent->port[0].cmd_addr = 0x1f0;
probe_ent->port[0].ctl_addr = 0x3f6;
probe_ent->n_ports = 1;
probe_ent->irq = 14;
ata_std_ports(&probe_ent->port[0]);
probe_ent2->port[0].cmd_addr = 0x170;
probe_ent2->port[0].ctl_addr = 0x376;
probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
probe_ent2->n_ports = 1;
probe_ent2->irq = 15;
ata_std_ports(&probe_ent2->port[0]);
probe_ent2->sht = port1->sht;
probe_ent2->host_flags = port1->host_flags;
probe_ent2->pio_mask = port1->pio_mask;
probe_ent2->udma_mask = port1->udma_mask;
probe_ent2->port_ops = port1->port_ops;
} else {
probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
ata_std_ports(&probe_ent->port[0]);
probe_ent->port[0].ctl_addr =
pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
ata_std_ports(&probe_ent->port[1]);
probe_ent->port[1].ctl_addr =
pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
probe_ent->n_ports = 2;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = SA_SHIRQ;
}
pci_set_master(pdev);
/* FIXME: check ata_device_add return */
if (legacy_mode) {
if (legacy_mode & (1 << 0))
ata_device_add(probe_ent);
if (legacy_mode & (1 << 1))
ata_device_add(probe_ent2);
kfree(probe_ent2);
} else {
ata_device_add(probe_ent);
assert(probe_ent2 == NULL);
}
kfree(probe_ent);
return 0;
err_out_free_ent:
kfree(probe_ent);
err_out_regions:
if (legacy_mode & (1 << 0))
release_region(0x1f0, 8);
if (legacy_mode & (1 << 1))
release_region(0x170, 8);
pci_release_regions(pdev);
err_out:
pci_disable_device(pdev);
return rc;
}
/**
* ata_pci_remove_one - PCI layer callback for device removal
* @pdev: PCI device that was removed
*
* PCI layer indicates to libata via this hook that
* hot-unplug or module unload event has occured.
* Handle this by unregistering all objects associated
* with this PCI device. Free those objects. Then finally
* release PCI resources and disable device.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*/
void ata_pci_remove_one (struct pci_dev *pdev)
{
struct ata_host_set *host_set = pci_get_drvdata(pdev);
struct ata_port *ap;
unsigned int i;
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
/* FIXME: check return val */
scsi_remove_host(ap->host);
}
free_irq(host_set->irq, host_set);
if (host_set->mmio_base)
iounmap(host_set->mmio_base);
for (i = 0; i < host_set->n_ports; i++) {
Scsi_Host_Template *sht;
ap = host_set->ports[i];
sht = ap->host->hostt;
ata_scsi_release(ap->host);
scsi_host_put(ap->host); /* FIXME: check return val */
}
kfree(host_set);
pci_release_regions(pdev);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_ioports *ioaddr;
ap = host_set->ports[i];
ioaddr = &ap->ioaddr;
if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
if (ioaddr->cmd_addr == 0x1f0)
release_region(0x1f0, 8);
else if (ioaddr->cmd_addr == 0x170)
release_region(0x170, 8);
}
}
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
{
unsigned long tmp = 0;
switch (bits->width) {
case 1: {
u8 tmp8 = 0;
pci_read_config_byte(pdev, bits->reg, &tmp8);
tmp = tmp8;
break;
}
case 2: {
u16 tmp16 = 0;
pci_read_config_word(pdev, bits->reg, &tmp16);
tmp = tmp16;
break;
}
case 4: {
u32 tmp32 = 0;
pci_read_config_dword(pdev, bits->reg, &tmp32);
tmp = tmp32;
break;
}
default:
return -EINVAL;
}
tmp &= bits->mask;
return (tmp == bits->val) ? 1 : 0;
}
/**
* ata_init -
*
* LOCKING:
*
* RETURNS:
*
*/
static int __init ata_init(void)
{
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
}
module_init(ata_init);
/*
* libata is essentially a library of internal helper functions for
* low-level ATA host controller drivers. As such, the API/ABI is
* likely to change as new drivers are added and updated.
* Do not depend on ABI/API stability.
*/
EXPORT_SYMBOL_GPL(pci_test_config_bits);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_device_add);
EXPORT_SYMBOL_GPL(ata_qc_complete);
EXPORT_SYMBOL_GPL(ata_eng_timeout);
EXPORT_SYMBOL_GPL(ata_tf_load_pio);
EXPORT_SYMBOL_GPL(ata_tf_load_mmio);
EXPORT_SYMBOL_GPL(ata_tf_read_pio);
EXPORT_SYMBOL_GPL(ata_tf_read_mmio);
EXPORT_SYMBOL_GPL(ata_check_status_pio);
EXPORT_SYMBOL_GPL(ata_check_status_mmio);
EXPORT_SYMBOL_GPL(ata_exec_command_pio);
EXPORT_SYMBOL_GPL(ata_exec_command_mmio);
EXPORT_SYMBOL_GPL(ata_interrupt);
EXPORT_SYMBOL_GPL(ata_fill_sg);
EXPORT_SYMBOL_GPL(ata_bmdma_start_pio);
EXPORT_SYMBOL_GPL(ata_bmdma_start_mmio);
EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(sata_phy_reset);
EXPORT_SYMBOL_GPL(pata_phy_config);
EXPORT_SYMBOL_GPL(ata_bus_reset);
EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_error);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_release);
/*
libata-scsi.c - helper library for ATA
Copyright 2003 Red Hat, Inc. All rights reserved.
Copyright 2003 Jeff Garzik
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
http://www.opensource.org/licenses/osl-1.1.txt and is included herein
by reference.
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
*/
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#include "libata.h"
struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
struct ata_device *dev,
Scsi_Cmnd *cmd,
void (*done)(Scsi_Cmnd *))
{
struct ata_queued_cmd *qc;
qc = ata_qc_new_init(ap, dev);
if (qc) {
qc->scsicmd = cmd;
qc->scsidone = done;
if (cmd->use_sg) {
qc->sg = (struct scatterlist *) cmd->request_buffer;
qc->n_elem = cmd->use_sg;
} else {
qc->sg = &qc->sgent;
qc->n_elem = 1;
}
} else {
cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
done(cmd);
}
return qc;
}
/**
* ata_to_sense_error -
* @qc:
* @cmd:
*
* LOCKING:
*/
void ata_to_sense_error(struct ata_queued_cmd *qc)
{
Scsi_Cmnd *cmd = qc->scsicmd;
cmd->result = SAM_STAT_CHECK_CONDITION;
cmd->sense_buffer[0] = 0x70;
cmd->sense_buffer[2] = MEDIUM_ERROR;
cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
/* additional-sense-code[-qualifier] */
if ((qc->flags & ATA_QCFLAG_WRITE) == 0) {
cmd->sense_buffer[12] = 0x11; /* "unrecovered read error" */
cmd->sense_buffer[13] = 0x04;
} else {
cmd->sense_buffer[12] = 0x0C; /* "write error - */
cmd->sense_buffer[13] = 0x02; /* auto-reallocation failed" */
}
}
/**
* ata_scsi_slave_config -
* @sdev:
*
* LOCKING:
*
*/
int ata_scsi_slave_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
return 0; /* scsi layer doesn't check return value, sigh */
}
/**
* ata_scsi_error - SCSI layer error handler callback
* @host: SCSI host on which error occurred
*
* Handles SCSI-layer-thrown error events.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*
* RETURNS:
* Zero.
*/
int ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap;
DPRINTK("ENTER\n");
ap = (struct ata_port *) &host->hostdata[0];
ap->ops->eng_timeout(ap);
DPRINTK("EXIT\n");
return 0;
}
/**
* ata_scsi_rw_xlat -
* @qc:
* @scsicmd:
* @cmd_size:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
*
*/
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd,
unsigned int cmd_size)
{
struct ata_taskfile *tf = &qc->tf;
unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
unsigned int dma = qc->flags & ATA_QCFLAG_DMA;
qc->cursect = qc->cursg = qc->cursg_ofs = 0;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->hob_nsect = 0;
tf->hob_lbal = 0;
tf->hob_lbam = 0;
tf->hob_lbah = 0;
if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
scsicmd[0] == READ_16) {
if (likely(dma)) {
if (lba48)
tf->command = ATA_CMD_READ_EXT;
else
tf->command = ATA_CMD_READ;
tf->protocol = ATA_PROT_DMA_READ;
} else {
if (lba48)
tf->command = ATA_CMD_PIO_READ_EXT;
else
tf->command = ATA_CMD_PIO_READ;
tf->protocol = ATA_PROT_PIO_READ;
}
qc->flags &= ~ATA_QCFLAG_WRITE;
VPRINTK("reading\n");
} else {
if (likely(dma)) {
if (lba48)
tf->command = ATA_CMD_WRITE_EXT;
else
tf->command = ATA_CMD_WRITE;
tf->protocol = ATA_PROT_DMA_WRITE;
} else {
if (lba48)
tf->command = ATA_CMD_PIO_WRITE_EXT;
else
tf->command = ATA_CMD_PIO_WRITE;
tf->protocol = ATA_PROT_PIO_WRITE;
}
qc->flags |= ATA_QCFLAG_WRITE;
VPRINTK("writing\n");
}
if (cmd_size == 10) {
if (lba48) {
tf->hob_nsect = scsicmd[7];
tf->hob_lbal = scsicmd[2];
qc->nsect = ((unsigned int)scsicmd[7] << 8) |
scsicmd[8];
} else {
/* if we don't support LBA48 addressing, the request
* -may- be too large. */
if ((scsicmd[2] & 0xf0) || scsicmd[7])
return 1;
/* stores LBA27:24 in lower 4 bits of device reg */
tf->device |= scsicmd[2];
qc->nsect = scsicmd[8];
}
tf->device |= ATA_LBA;
tf->nsect = scsicmd[8];
tf->lbal = scsicmd[5];
tf->lbam = scsicmd[4];
tf->lbah = scsicmd[3];
VPRINTK("ten-byte command\n");
return 0;
}
if (cmd_size == 6) {
qc->nsect = tf->nsect = scsicmd[4];
tf->lbal = scsicmd[3];
tf->lbam = scsicmd[2];
tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
VPRINTK("six-byte command\n");
return 0;
}
if (cmd_size == 16) {
/* rule out impossible LBAs and sector counts */
if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
return 1;
if (lba48) {
tf->hob_nsect = scsicmd[12];
tf->hob_lbal = scsicmd[6];
tf->hob_lbam = scsicmd[5];
tf->hob_lbah = scsicmd[4];
qc->nsect = ((unsigned int)scsicmd[12] << 8) |
scsicmd[13];
} else {
/* once again, filter out impossible non-zero values */
if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
(scsicmd[6] & 0xf0))
return 1;
/* stores LBA27:24 in lower 4 bits of device reg */
tf->device |= scsicmd[2];
qc->nsect = scsicmd[13];
}
tf->device |= ATA_LBA;
tf->nsect = scsicmd[13];
tf->lbal = scsicmd[9];
tf->lbam = scsicmd[8];
tf->lbah = scsicmd[7];
VPRINTK("sixteen-byte command\n");
return 0;
}
DPRINTK("no-byte command\n");
return 1;
}
/**
* ata_scsi_rw_queue -
* @ap:
* @dev:
* @cmd:
* @done:
* @cmd_size:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev,
Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *),
unsigned int cmd_size)
{
struct ata_queued_cmd *qc;
u8 *scsicmd = cmd->cmnd;
VPRINTK("ENTER\n");
if (unlikely(cmd->request_bufflen < 1)) {
printk(KERN_WARNING "ata%u(%u): empty request buffer\n",
ap->id, dev->devno);
goto err_out;
}
qc = ata_scsi_qc_new(ap, dev, cmd, done);
if (!qc)
return;
qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */
if (ata_scsi_rw_xlat(qc, scsicmd, cmd_size))
goto err_out;
/* select device, send command to hardware */
if (ata_qc_issue(qc))
goto err_out;
VPRINTK("EXIT\n");
return;
err_out:
ata_bad_cdb(cmd, done);
DPRINTK("EXIT - badcmd\n");
}
/**
* ata_scsi_rbuf_get - Map response buffer.
* @cmd: SCSI command containing buffer to be mapped.
* @buf_out: Pointer to mapped area.
*
* Maps buffer contained within SCSI command @cmd.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* FIXME: kmap inside spin_lock_irqsave ok?
*
* RETURNS:
* Length of response buffer.
*/
static unsigned int ata_scsi_rbuf_get(Scsi_Cmnd *cmd, u8 **buf_out)
{
u8 *buf;
unsigned int buflen;
if (cmd->use_sg) {
struct scatterlist *sg;
sg = (struct scatterlist *) cmd->request_buffer;
buf = kmap(sg->page) + sg->offset;
buflen = sg->length;
} else {
buf = cmd->request_buffer;
buflen = cmd->request_bufflen;
}
memset(buf, 0, buflen);
*buf_out = buf;
return buflen;
}
/**
* ata_scsi_rbuf_put - Unmap response buffer.
* @cmd: SCSI command containing buffer to be unmapped.
*
* Unmaps response buffer contained within @cmd.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static inline void ata_scsi_rbuf_put(Scsi_Cmnd *cmd)
{
if (cmd->use_sg) {
struct scatterlist *sg;
sg = (struct scatterlist *) cmd->request_buffer;
kunmap(sg->page);
}
}
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
* @args: Port / device / SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
* Mapping the response buffer, calling the command's handler,
* and handling the handler's return value. This return value
* indicates whether the handler wishes the SCSI command to be
* completed successfully, or not.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen))
{
u8 *rbuf;
unsigned int buflen, rc;
Scsi_Cmnd *cmd = args->cmd;
buflen = ata_scsi_rbuf_get(cmd, &rbuf);
rc = actor(args, rbuf, buflen);
ata_scsi_rbuf_put(cmd);
if (rc)
ata_bad_cdb(cmd, args->done);
else {
cmd->result = SAM_STAT_GOOD;
args->done(cmd);
}
}
/**
* ata_scsiop_inq_std - Simulate INQUIRY command
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Returns standard device identification data associated
* with non-EVPD INQUIRY command output.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
const u8 hdr[] = {
TYPE_DISK,
0,
0x5, /* claim SPC-3 version compatibility */
2,
96 - 4
};
VPRINTK("ENTER\n");
memcpy(rbuf, hdr, sizeof(hdr));
if (buflen > 36) {
memcpy(&rbuf[8], args->dev->vendor, 8);
memcpy(&rbuf[16], args->dev->product, 16);
memcpy(&rbuf[32], DRV_VERSION, 4);
}
if (buflen > 63) {
const u8 versions[] = {
0x60, /* SAM-3 (no version claimed) */
0x03,
0x20, /* SBC-2 (no version claimed) */
0x02,
0x60 /* SPC-3 (no version claimed) */
};
memcpy(rbuf + 59, versions, sizeof(versions));
}
return 0;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY EVPD page 0, list of pages
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Returns list of inquiry EVPD pages available.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
0x83 /* page 0x83, device ident page */
};
rbuf[3] = sizeof(pages); /* number of supported EVPD pages */
if (buflen > 6)
memcpy(rbuf + 4, pages, sizeof(pages));
return 0;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY EVPD page 80, device serial number
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Returns ATA device serial number.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
const u8 hdr[] = {
0,
0x80, /* this page code */
0,
ATA_SERNO_LEN, /* page len */
};
memcpy(rbuf, hdr, sizeof(hdr));
if (buflen > (ATA_SERNO_LEN + 4))
ata_dev_id_string(args->dev, (unsigned char *) &rbuf[4],
ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
return 0;
}
static const char *inq_83_str = "Linux ATA-SCSI simulator";
/**
* ata_scsiop_inq_83 - Simulate INQUIRY EVPD page 83, device identity
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Returns device identification. Currently hardcoded to
* return "Linux ATA-SCSI simulator".
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
rbuf[1] = 0x83; /* this page code */
rbuf[3] = 4 + strlen(inq_83_str); /* page len */
/* our one and only identification descriptor (vendor-specific) */
if (buflen > (strlen(inq_83_str) + 4 + 4)) {
rbuf[4 + 0] = 2; /* code set: ASCII */
rbuf[4 + 3] = strlen(inq_83_str);
memcpy(rbuf + 4 + 4, inq_83_str, strlen(inq_83_str));
}
return 0;
}
/**
* ata_scsiop_noop -
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* No operation. Simply returns success to caller, to indicate
* that the caller should successfully complete this SCSI command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
VPRINTK("ENTER\n");
return 0;
}
/**
* ata_scsiop_sync_cache - Simulate SYNCHRONIZE CACHE command
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Initiates flush of device's cache.
*
* TODO:
* Actually do this :)
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
VPRINTK("ENTER\n");
/* FIXME */
return 1;
}
/**
* ata_msense_push - Push data onto MODE SENSE data output buffer
* @ptr_io: (input/output) Location to store more output data
* @last: End of output data buffer
* @buf: Pointer to BLOB being added to output buffer
* @buflen: Length of BLOB
*
* Store MODE SENSE data on an output buffer.
*
* LOCKING:
* None.
*/
static void ata_msense_push(u8 **ptr_io, const u8 *last,
const u8 *buf, unsigned int buflen)
{
u8 *ptr = *ptr_io;
if ((ptr + buflen - 1) > last)
return;
memcpy(ptr, buf, buflen);
ptr += buflen;
*ptr_io = ptr;
}
/**
* ata_msense_caching - Simulate MODE SENSE caching info page
* @dev:
* @ptr_io:
* @last:
*
* Generate a caching info page, which conditionally indicates
* write caching to the SCSI layer, depending on device
* capabilities.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_caching(struct ata_device *dev, u8 **ptr_io,
const u8 *last)
{
u8 page[7] = { 0xf, 0, 0x10, 0, 0x8, 0xa, 0 };
if (dev->flags & ATA_DFLAG_WCACHE)
page[6] = 0x4;
ata_msense_push(ptr_io, last, page, sizeof(page));
return sizeof(page);
}
/**
* ata_msense_ctl_mode - Simulate MODE SENSE control mode page
* @dev:
* @ptr_io:
* @last:
*
* Generate a generic MODE SENSE control mode page.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
{
const u8 page[] = {0xa, 0xa, 2, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 30};
ata_msense_push(ptr_io, last, page, sizeof(page));
return sizeof(page);
}
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Simulate MODE SENSE commands.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
u8 *scsicmd = args->cmd->cmnd, *p, *last;
struct ata_device *dev = args->dev;
unsigned int page_control, six_byte, output_len;
VPRINTK("ENTER\n");
six_byte = (scsicmd[0] == MODE_SENSE);
/* we only support saved and current values (which we treat
* in the same manner)
*/
page_control = scsicmd[2] >> 6;
if ((page_control != 0) && (page_control != 3))
return 1;
if (six_byte)
output_len = 4;
else
output_len = 8;
p = rbuf + output_len;
last = rbuf + buflen - 1;
switch(scsicmd[2] & 0x3f) {
case 0x08: /* caching */
output_len += ata_msense_caching(dev, &p, last);
break;
case 0x0a: { /* control mode */
output_len += ata_msense_ctl_mode(&p, last);
break;
}
case 0x3f: /* all pages */
output_len += ata_msense_caching(dev, &p, last);
output_len += ata_msense_ctl_mode(&p, last);
break;
default: /* invalid page code */
return 1;
}
if (six_byte) {
output_len--;
rbuf[0] = output_len;
} else {
output_len -= 2;
rbuf[0] = output_len >> 8;
rbuf[1] = output_len;
}
return 0;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Simulate READ CAPACITY commands.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
u64 n_sectors = args->dev->n_sectors;
u32 tmp;
VPRINTK("ENTER\n");
n_sectors--; /* one off */
tmp = n_sectors; /* note: truncates, if lba48 */
if (args->cmd->cmnd[0] == READ_CAPACITY) {
rbuf[0] = tmp >> (8 * 3);
rbuf[1] = tmp >> (8 * 2);
rbuf[2] = tmp >> (8 * 1);
rbuf[3] = tmp;
tmp = ATA_SECT_SIZE;
rbuf[6] = tmp >> 8;
rbuf[7] = tmp;
} else {
rbuf[2] = n_sectors >> (8 * 7);
rbuf[3] = n_sectors >> (8 * 6);
rbuf[4] = n_sectors >> (8 * 5);
rbuf[5] = n_sectors >> (8 * 4);
rbuf[6] = tmp >> (8 * 3);
rbuf[7] = tmp >> (8 * 2);
rbuf[8] = tmp >> (8 * 1);
rbuf[9] = tmp;
tmp = ATA_SECT_SIZE;
rbuf[12] = tmp >> 8;
rbuf[13] = tmp;
}
return 0;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
* @args: Port / device / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
* @buflen: Response buffer length.
*
* Simulate REPORT LUNS command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
VPRINTK("ENTER\n");
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
return 0;
}
/**
* ata_scsi_badcmd -
* @cmd:
* @done:
* @asc:
* @ascq:
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_scsi_badcmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), u8 asc, u8 ascq)
{
DPRINTK("ENTER\n");
cmd->result = SAM_STAT_CHECK_CONDITION;
cmd->sense_buffer[0] = 0x70;
cmd->sense_buffer[2] = ILLEGAL_REQUEST;
cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
cmd->sense_buffer[12] = asc;
cmd->sense_buffer[13] = ascq;
done(cmd);
}
/**
* atapi_scsi_queuecmd - Send CDB to ATAPI device
* @ap: Port to which ATAPI device is attached.
* @dev: Target device for CDB.
* @cmd: SCSI command being sent to device.
* @done: SCSI command completion function.
*
* Sends CDB to ATAPI device. If the Linux SCSI layer sends a
* non-data command, then this function handles the command
* directly, via polling. Otherwise, the bmdma engine is started.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void atapi_scsi_queuecmd(struct ata_port *ap, struct ata_device *dev,
Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
struct ata_queued_cmd *qc;
u8 *scsicmd = cmd->cmnd, status;
unsigned int doing_dma = 0;
VPRINTK("ENTER, drv_stat = 0x%x\n", ata_chk_status(ap));
if (cmd->sc_data_direction == SCSI_DATA_UNKNOWN) {
DPRINTK("unknown data, scsicmd 0x%x\n", scsicmd[0]);
ata_bad_cdb(cmd, done);
return;
}
switch(scsicmd[0]) {
case READ_6:
case WRITE_6:
case MODE_SELECT:
case MODE_SENSE:
DPRINTK("read6/write6/modesel/modesense trap\n");
ata_bad_scsiop(cmd, done);
return;
default:
/* do nothing */
break;
}
qc = ata_scsi_qc_new(ap, dev, cmd, done);
if (!qc) {
printk(KERN_ERR "ata%u: command queue empty\n", ap->id);
return;
}
qc->flags |= ATA_QCFLAG_ATAPI;
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (cmd->sc_data_direction == SCSI_DATA_WRITE) {
qc->flags |= ATA_QCFLAG_WRITE;
DPRINTK("direction: write\n");
}
qc->tf.command = ATA_CMD_PACKET;
/* set up SG table */
if (cmd->sc_data_direction == SCSI_DATA_NONE) {
ap->active_tag = qc->tag;
qc->flags |= ATA_QCFLAG_ACTIVE | ATA_QCFLAG_POLL;
qc->tf.protocol = ATA_PROT_ATAPI;
ata_dev_select(ap, dev->devno, 1, 0);
DPRINTK("direction: none\n");
qc->tf.ctl |= ATA_NIEN; /* disable interrupts */
ata_tf_to_host_nolock(ap, &qc->tf);
} else {
qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */
qc->tf.feature = ATAPI_PKT_DMA;
qc->tf.protocol = ATA_PROT_ATAPI_DMA;
doing_dma = 1;
/* select device, send command to hardware */
if (ata_qc_issue(qc))
goto err_out;
}
status = ata_busy_wait(ap, ATA_BUSY, 1000);
if (status & ATA_BUSY) {
ata_thread_wake(ap, THR_PACKET);
return;
}
if ((status & ATA_DRQ) == 0)
goto err_out;
/* FIXME: mmio-ize */
DPRINTK("writing cdb\n");
outsl(ap->ioaddr.data_addr, scsicmd, ap->host->max_cmd_len / 4);
if (!doing_dma)
ata_thread_wake(ap, THR_PACKET);
VPRINTK("EXIT\n");
return;
err_out:
if (!doing_dma)
ata_irq_on(ap); /* re-enable interrupts */
ata_bad_cdb(cmd, done);
DPRINTK("EXIT - badcmd\n");
}
/**
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent
* @done: Completion function, called when command is complete
*
* In some cases, this function translates SCSI commands into
* ATA taskfiles, and queues the taskfiles to be sent to
* hardware. In other cases, this function simulates a
* SCSI device by evaluating and responding to certain
* SCSI commands. This creates the overall effect of
* ATA and ATAPI devices appearing as SCSI devices.
*
* LOCKING:
* Releases scsi-layer-held lock, and obtains host_set lock.
*
* RETURNS:
* Zero.
*/
int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
u8 *scsicmd = cmd->cmnd;
struct ata_port *ap;
struct ata_device *dev;
struct ata_scsi_args args;
const unsigned int atapi_support =
#ifdef ATA_ENABLE_ATAPI
1;
#else
0;
#endif
/* Note: spin_lock_irqsave is held by caller... */
spin_unlock(cmd->device->host->host_lock);
ap = (struct ata_port *) &cmd->device->host->hostdata[0];
DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
ap->id,
cmd->device->channel, cmd->device->id, cmd->device->lun,
scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
scsicmd[8]);
/* skip commands not addressed to targets we care about */
if ((cmd->device->channel != 0) || (cmd->device->lun != 0) ||
(cmd->device->id >= ATA_MAX_DEVICES)) {
cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */
done(cmd);
goto out;
}
spin_lock(&ap->host_set->lock);
dev = &ap->device[cmd->device->id];
if (!ata_dev_present(dev)) {
DPRINTK("no device\n");
cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */
done(cmd);
goto out_unlock;
}
if (dev->class == ATA_DEV_ATAPI) {
if (atapi_support)
atapi_scsi_queuecmd(ap, dev, cmd, done);
else {
cmd->result = (DID_BAD_TARGET << 16); /* correct? */
done(cmd);
}
goto out_unlock;
}
/* fast path */
switch(scsicmd[0]) {
case READ_6:
case WRITE_6:
ata_scsi_rw_queue(ap, dev, cmd, done, 6);
goto out_unlock;
case READ_10:
case WRITE_10:
ata_scsi_rw_queue(ap, dev, cmd, done, 10);
goto out_unlock;
case READ_16:
case WRITE_16:
ata_scsi_rw_queue(ap, dev, cmd, done, 16);
goto out_unlock;
default:
/* do nothing */
break;
}
/*
* slow path
*/
args.ap = ap;
args.dev = dev;
args.cmd = cmd;
args.done = done;
switch(scsicmd[0]) {
case TEST_UNIT_READY: /* FIXME: correct? */
case FORMAT_UNIT: /* FIXME: correct? */
case SEND_DIAGNOSTIC: /* FIXME: correct? */
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
break;
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
ata_bad_cdb(cmd, done);
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
else if (scsicmd[2] == 0x00)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
else if (scsicmd[2] == 0x80)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
else if (scsicmd[2] == 0x83)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
else
ata_bad_cdb(cmd, done);
break;
case MODE_SENSE:
case MODE_SENSE_10:
ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
break;
case MODE_SELECT: /* unconditionally return */
case MODE_SELECT_10: /* bad-field-in-cdb */
ata_bad_cdb(cmd, done);
break;
case SYNCHRONIZE_CACHE:
if ((dev->flags & ATA_DFLAG_WCACHE) == 0)
ata_bad_scsiop(cmd, done);
else
ata_scsi_rbuf_fill(&args, ata_scsiop_sync_cache);
break;
case READ_CAPACITY:
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
break;
case SERVICE_ACTION_IN:
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
ata_bad_cdb(cmd, done);
break;
case REPORT_LUNS:
ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
break;
/* mandantory commands we haven't implemented yet */
case REQUEST_SENSE:
/* all other commands */
default:
ata_bad_scsiop(cmd, done);
break;
}
out_unlock:
spin_unlock(&ap->host_set->lock);
out:
spin_lock(cmd->device->host->host_lock);
return 0;
}
/*
libata.h - helper library for ATA
Copyright 2003 Red Hat, Inc. All rights reserved.
Copyright 2003 Jeff Garzik
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
http://www.opensource.org/licenses/osl-1.1.txt and is included herein
by reference.
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
*/
#ifndef __LIBATA_H__
#define __LIBATA_H__
#define DRV_NAME "libata"
#define DRV_VERSION "0.75" /* must be exactly four chars */
struct ata_scsi_args {
struct ata_port *ap;
struct ata_device *dev;
Scsi_Cmnd *cmd;
void (*done)(Scsi_Cmnd *);
};
/* libata-core.c */
extern unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s,
unsigned int ofs, unsigned int len);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev);
extern int ata_qc_issue(struct ata_queued_cmd *qc);
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep);
extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_thread_wake(struct ata_port *ap, unsigned int thr_state);
/* libata-scsi.c */
extern void ata_to_sense_error(struct ata_queued_cmd *qc);
extern void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev,
Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *),
unsigned int cmd_size);
extern int ata_scsi_error(struct Scsi_Host *host);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
extern void ata_scsi_badcmd(Scsi_Cmnd *cmd,
void (*done)(Scsi_Cmnd *),
u8 asc, u8 ascq);
extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
static inline void ata_bad_scsiop(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
ata_scsi_badcmd(cmd, done, 0x20, 0x00);
}
static inline void ata_bad_cdb(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
ata_scsi_badcmd(cmd, done, 0x24, 0x00);
}
#endif /* __LIBATA_H__ */
/*
* sata_promise.c - Promise SATA
*
* Copyright 2003 Red Hat, Inc.
*
* The contents of this file are subject to the Open
* Software License version 1.1 that can be found at
* http://www.opensource.org/licenses/osl-1.1.txt and is included herein
* by reference.
*
* Alternatively, the contents of this file may be used under the terms
* of the GNU General Public License version 2 (the "GPL") as distributed
* in the kernel source COPYING file, in which case the provisions of
* the GPL are applicable instead of the above. If you wish to allow
* the use of your version of this file only under the terms of the
* GPL and not to allow others to use your version of this file under
* the OSL, indicate your decision by deleting the provisions above and
* replace them with the notice and other provisions required by the GPL.
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#define DRV_NAME "sata_promise"
#define DRV_VERSION "0.83"
enum {
PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
PDC_TBG_MODE = 0x41, /* TBG mode */
PDC_FLASH_CTL = 0x44, /* Flash control register */
PDC_CTLSTAT = 0x60, /* IDE control and status register */
PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
PDC_SLEW_CTL = 0x470, /* slew rate control reg */
PDC_20621_SEQCTL = 0x400,
PDC_20621_SEQMASK = 0x480,
PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
board_2037x = 0, /* FastTrak S150 TX2plus */
board_20319 = 1, /* FastTrak S150 TX4 */
board_20621 = 2, /* FastTrak S150 SX4 */
PDC_FLAG_20621 = (1 << 30), /* we have a 20621 */
};
static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void pdc_sata_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio);
static void pdc_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma);
static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void pdc_dma_start(struct ata_queued_cmd *qc);
static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
static void pdc_eng_timeout(struct ata_port *ap);
static void pdc_20621_phy_reset (struct ata_port *ap);
static Scsi_Host_Template pdc_sata_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = ata_scsi_queuecmd,
.eh_strategy_handler = ata_scsi_error,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = ATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
};
static struct ata_port_operations pdc_sata_ops = {
.port_disable = ata_port_disable,
.set_piomode = pdc_sata_set_piomode,
.set_udmamode = pdc_sata_set_udmamode,
.tf_load = ata_tf_load_mmio,
.tf_read = ata_tf_read_mmio,
.check_status = ata_check_status_mmio,
.exec_command = ata_exec_command_mmio,
.phy_reset = sata_phy_reset,
.phy_config = pata_phy_config, /* not a typo */
.bmdma_start = pdc_dma_start,
.fill_sg = ata_fill_sg,
.eng_timeout = pdc_eng_timeout,
.irq_handler = pdc_interrupt,
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
};
static struct ata_port_operations pdc_20621_ops = {
.port_disable = ata_port_disable,
.set_piomode = pdc_sata_set_piomode,
.set_udmamode = pdc_sata_set_udmamode,
.tf_load = ata_tf_load_mmio,
.tf_read = ata_tf_read_mmio,
.check_status = ata_check_status_mmio,
.exec_command = ata_exec_command_mmio,
.phy_reset = pdc_20621_phy_reset,
.phy_config = pata_phy_config, /* not a typo */
.bmdma_start = pdc_dma_start,
.fill_sg = ata_fill_sg,
.eng_timeout = pdc_eng_timeout,
.irq_handler = pdc_interrupt,
};
static struct ata_port_info pdc_port_info[] = {
/* board_2037x */
{
.sht = &pdc_sata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &pdc_sata_ops,
},
/* board_20319 */
{
.sht = &pdc_sata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &pdc_sata_ops,
},
/* board_20621 */
{
.sht = &pdc_sata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO |
PDC_FLAG_20621,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &pdc_20621_ops,
},
};
static struct pci_device_id pdc_sata_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_2037x },
{ PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_2037x },
{ PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
#if 0 /* broken currently */
{ PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20621 },
#endif
{ } /* terminate list */
};
static struct pci_driver pdc_sata_pci_driver = {
.name = DRV_NAME,
.id_table = pdc_sata_pci_tbl,
.probe = pdc_sata_init_one,
.remove = ata_pci_remove_one,
};
static void pdc_20621_phy_reset (struct ata_port *ap)
{
VPRINTK("ENTER\n");
ap->cbl = ATA_CBL_SATA;
ata_port_probe(ap);
ata_bus_reset(ap);
}
static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
if (sc_reg > SCR_CONTROL)
return 0xffffffffU;
return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
u32 val)
{
if (sc_reg > SCR_CONTROL)
return;
writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void pdc_sata_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio)
{
/* dummy */
}
static void pdc_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma)
{
/* dummy */
}
enum pdc_packet_bits {
PDC_PKT_READ = (1 << 2),
PDC_PKT_NODATA = (1 << 3),
PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
PDC_PKT_CLEAR_BSY = (1 << 4),
PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
PDC_LAST_REG = (1 << 3),
PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
};
static inline void pdc_pkt_header(struct ata_taskfile *tf, dma_addr_t sg_table,
unsigned int devno, u8 *buf)
{
u8 dev_reg;
u32 *buf32 = (u32 *) buf;
/* set control bits (byte 0), zero delay seq id (byte 3),
* and seq id (byte 2)
*/
switch (tf->protocol) {
case ATA_PROT_DMA_READ:
buf32[0] = cpu_to_le32(PDC_PKT_READ);
break;
case ATA_PROT_DMA_WRITE:
buf32[0] = 0;
break;
case ATA_PROT_NODATA:
buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
break;
default:
BUG();
break;
}
buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
buf32[2] = 0; /* no next-packet */
if (devno == 0)
dev_reg = ATA_DEVICE_OBS;
else
dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
/* select device */
buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
buf[13] = dev_reg;
/* device control register */
buf[14] = (1 << 5) | PDC_REG_DEVCTL;
buf[15] = tf->ctl;
}
static inline void pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
unsigned int i)
{
if (tf->flags & ATA_TFLAG_DEVICE) {
buf[i++] = (1 << 5) | ATA_REG_DEVICE;
buf[i++] = tf->device;
}
/* and finally the command itself; also includes end-of-pkt marker */
buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
buf[i++] = tf->command;
}
static void pdc_prep_lba28(struct ata_taskfile *tf, dma_addr_t sg_table,
unsigned int devno, u8 *buf)
{
unsigned int i;
pdc_pkt_header(tf, sg_table, devno, buf);
/* the "(1 << 5)" should be read "(count << 5)" */
i = 16;
/* ATA command block registers */
buf[i++] = (1 << 5) | ATA_REG_FEATURE;
buf[i++] = tf->feature;
buf[i++] = (1 << 5) | ATA_REG_NSECT;
buf[i++] = tf->nsect;
buf[i++] = (1 << 5) | ATA_REG_LBAL;
buf[i++] = tf->lbal;
buf[i++] = (1 << 5) | ATA_REG_LBAM;
buf[i++] = tf->lbam;
buf[i++] = (1 << 5) | ATA_REG_LBAH;
buf[i++] = tf->lbah;
pdc_pkt_footer(tf, buf, i);
}
static void pdc_prep_lba48(struct ata_taskfile *tf, dma_addr_t sg_table,
unsigned int devno, u8 *buf)
{
unsigned int i;
pdc_pkt_header(tf, sg_table, devno, buf);
/* the "(2 << 5)" should be read "(count << 5)" */
i = 16;
/* ATA command block registers */
buf[i++] = (2 << 5) | ATA_REG_FEATURE;
buf[i++] = tf->hob_feature;
buf[i++] = tf->feature;
buf[i++] = (2 << 5) | ATA_REG_NSECT;
buf[i++] = tf->hob_nsect;
buf[i++] = tf->nsect;
buf[i++] = (2 << 5) | ATA_REG_LBAL;
buf[i++] = tf->hob_lbal;
buf[i++] = tf->lbal;
buf[i++] = (2 << 5) | ATA_REG_LBAM;
buf[i++] = tf->hob_lbam;
buf[i++] = tf->lbam;
buf[i++] = (2 << 5) | ATA_REG_LBAH;
buf[i++] = tf->hob_lbah;
buf[i++] = tf->lbah;
pdc_pkt_footer(tf, buf, i);
}
static inline void __pdc_dma_complete (struct ata_port *ap,
struct ata_queued_cmd *qc)
{
void *dmactl = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
u32 val;
/* clear DMA start/stop bit (bit 7) */
val = readl(dmactl);
writel(val & ~(1 << 7), dmactl);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_altstatus(ap); /* dummy read */
}
static inline void pdc_dma_complete (struct ata_port *ap,
struct ata_queued_cmd *qc)
{
__pdc_dma_complete(ap, qc);
/* get drive status; clear intr; complete txn */
ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
ata_wait_idle(ap), 0);
}
static void pdc_eng_timeout(struct ata_port *ap)
{
u8 drv_stat;
struct ata_queued_cmd *qc;
DPRINTK("ENTER\n");
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
ap->id);
goto out;
}
switch (qc->tf.protocol) {
case ATA_PROT_DMA_READ:
case ATA_PROT_DMA_WRITE:
printk(KERN_ERR "ata%u: DMA timeout\n", ap->id);
__pdc_dma_complete(ap, qc);
ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
ata_wait_idle(ap) | ATA_ERR, 0);
break;
case ATA_PROT_NODATA:
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n",
ap->id, qc->tf.command, drv_stat);
ata_qc_complete(qc, drv_stat, 1);
break;
default:
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
ap->id, qc->tf.command, drv_stat);
ata_qc_complete(qc, drv_stat, 1);
break;
}
out:
DPRINTK("EXIT\n");
}
static inline unsigned int pdc_host_intr( struct ata_port *ap,
struct ata_queued_cmd *qc)
{
u8 status;
unsigned int handled = 0;
switch (qc->tf.protocol) {
case ATA_PROT_DMA_READ:
case ATA_PROT_DMA_WRITE:
pdc_dma_complete(ap, qc);
handled = 1;
break;
case ATA_PROT_NODATA: /* command completion, but no data xfer */
status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
ata_qc_complete(qc, status, 0);
handled = 1;
break;
default:
ap->stats.idle_irq++;
break;
}
return handled;
}
static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp;
unsigned int handled = 0, have_20621 = 0;
void *mmio_base;
VPRINTK("ENTER\n");
if (!host_set || !host_set->mmio_base) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
mmio_base = host_set->mmio_base;
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
if (ap && (ap->flags & PDC_FLAG_20621)) {
have_20621 = 1;
break;
}
}
/* reading should also clear interrupts */
if (have_20621) {
mmio_base += PDC_CHIP0_OFS;
mask = readl(mmio_base + PDC_20621_SEQMASK);
} else {
mask = readl(mmio_base + PDC_INT_SEQMASK);
}
if (mask == 0xffffffff) {
VPRINTK("QUICK EXIT 2\n");
return IRQ_NONE;
}
mask &= 0xf; /* only 16 tags possible */
if (!mask) {
VPRINTK("QUICK EXIT 3\n");
return IRQ_NONE;
}
spin_lock_irq(&host_set->lock);
for (i = 0; i < host_set->n_ports; i++) {
VPRINTK("port %u\n", i);
ap = host_set->ports[i];
tmp = mask & (1 << (i + 1));
if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0))
handled += pdc_host_intr(ap, qc);
}
}
spin_unlock_irq(&host_set->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
static void pdc_dma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
unsigned int port_no = ap->port_no;
void *mmio = host_set->mmio_base;
void *dmactl = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE);
u32 val;
u8 seq = (u8) (port_no + 1);
wmb(); /* flush writes made to PRD table in DMA memory */
if (ap->flags & PDC_FLAG_20621)
mmio += PDC_CHIP0_OFS;
VPRINTK("ENTER, ap %p, mmio %p\n", ap, mmio);
/* indicate where our S/G table is to chip */
writel(ap->prd_dma, (void *) ap->ioaddr.cmd_addr + PDC_PRD_TBL);
/* clear dma start bit (paranoia), clear intr seq id (paranoia),
* set DMA direction (bit 6 == from chip -> drive)
*/
val = readl(dmactl);
VPRINTK("val == %x\n", val);
val &= ~(1 << 7); /* clear dma start/stop bit */
if (rw) /* set/clear dma direction bit */
val |= (1 << 6);
else
val &= ~(1 << 6);
if (qc->tf.ctl & ATA_NIEN) /* set/clear irq-mask bit */
val |= (1 << 10);
else
val &= ~(1 << 10);
writel(val, dmactl);
val = readl(dmactl);
VPRINTK("val == %x\n", val);
/* FIXME: clear any intr status bits here? */
ata_exec_command_mmio(ap, &qc->tf);
VPRINTK("FIVE\n");
if (ap->flags & PDC_FLAG_20621)
writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
else
writel(0x00000001, mmio + (seq * 4));
/* start host DMA transaction */
writel(val | seq | (1 << 7), dmactl);
}
static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
{
port->cmd_addr = base;
port->data_addr = base;
port->error_addr = base + 0x4;
port->nsect_addr = base + 0x8;
port->lbal_addr = base + 0xc;
port->lbam_addr = base + 0x10;
port->lbah_addr = base + 0x14;
port->device_addr = base + 0x18;
port->cmdstat_addr = base + 0x1c;
port->ctl_addr = base + 0x38;
}
static void pdc_20621_init(struct ata_probe_ent *pe)
{
}
static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
{
void *mmio = pe->mmio_base;
u32 tmp;
if (chip_id == board_20621)
return;
/* change FIFO_SHD to 8 dwords. Promise driver does this...
* dunno why.
*/
tmp = readl(mmio + PDC_FLASH_CTL);
if ((tmp & (1 << 16)) == 0)
writel(tmp | (1 << 16), mmio + PDC_FLASH_CTL);
/* clear plug/unplug flags for all ports */
tmp = readl(mmio + PDC_SATA_PLUG_CSR);
writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR);
/* mask plug/unplug ints */
tmp = readl(mmio + PDC_SATA_PLUG_CSR);
writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR);
/* reduce TBG clock to 133 Mhz. FIXME: why? */
tmp = readl(mmio + PDC_TBG_MODE);
tmp &= ~0x30000; /* clear bit 17, 16*/
tmp |= 0x10000; /* set bit 17:16 = 0:1 */
writel(tmp, mmio + PDC_TBG_MODE);
/* adjust slew rate control register. FIXME: why? */
tmp = readl(mmio + PDC_SLEW_CTL);
tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
writel(tmp, mmio + PDC_SLEW_CTL);
}
static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_probe_ent *probe_ent = NULL;
unsigned long base;
void *mmio_base;
unsigned int board_idx = (unsigned int) ent->driver_data;
int rc;
if (!printed_version++)
printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
goto err_out_regions;
probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
if (probe_ent == NULL) {
rc = -ENOMEM;
goto err_out_regions;
}
memset(probe_ent, 0, sizeof(*probe_ent));
probe_ent->pdev = pdev;
INIT_LIST_HEAD(&probe_ent->node);
mmio_base = ioremap(pci_resource_start(pdev, 3),
pci_resource_len(pdev, 3));
if (mmio_base == NULL) {
rc = -ENOMEM;
goto err_out_free_ent;
}
base = (unsigned long) mmio_base;
probe_ent->sht = pdc_port_info[board_idx].sht;
probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = SA_SHIRQ;
probe_ent->mmio_base = mmio_base;
if (board_idx == board_20621)
base += PDC_CHIP0_OFS;
pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
probe_ent->port[0].scr_addr = base + 0x400;
pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
probe_ent->port[1].scr_addr = base + 0x500;
/* notice 4-port boards */
switch (board_idx) {
case board_20319:
case board_20621:
probe_ent->n_ports = 4;
pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
probe_ent->port[2].scr_addr = base + 0x600;
pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
probe_ent->port[3].scr_addr = base + 0x700;
break;
case board_2037x:
probe_ent->n_ports = 2;
break;
default:
BUG();
break;
}
pci_set_master(pdev);
/* initialize adapter */
switch (board_idx) {
case board_20621:
pdc_20621_init(probe_ent);
break;
default:
pdc_host_init(board_idx, probe_ent);
break;
}
/* FIXME: check ata_device_add return value */
ata_device_add(probe_ent);
kfree(probe_ent);
return 0;
err_out_free_ent:
kfree(probe_ent);
err_out_regions:
pci_release_regions(pdev);
err_out:
pci_disable_device(pdev);
return rc;
}
static int __init pdc_sata_init(void)
{
int rc;
rc = pci_module_init(&pdc_sata_pci_driver);
if (rc)
return rc;
return 0;
}
static void __exit pdc_sata_exit(void)
{
pci_unregister_driver(&pdc_sata_pci_driver);
}
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Promise SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
module_init(pdc_sata_init);
module_exit(pdc_sata_exit);
/*
* ata_sil.c - Silicon Image SATA
*
* Copyright 2003 Red Hat, Inc.
* Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
*
* The contents of this file are subject to the Open
* Software License version 1.1 that can be found at
* http://www.opensource.org/licenses/osl-1.1.txt and is included herein
* by reference.
*
* Alternatively, the contents of this file may be used under the terms
* of the GNU General Public License version 2 (the "GPL") as distributed
* in the kernel source COPYING file, in which case the provisions of
* the GPL are applicable instead of the above. If you wish to allow
* the use of your version of this file only under the terms of the
* GPL and not to allow others to use your version of this file under
* the OSL, indicate your decision by deleting the provisions above and
* replace them with the notice and other provisions required by the GPL.
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#define DRV_NAME "ata_sil"
#define DRV_VERSION "0.51"
enum {
sil_3112 = 0,
SIL_IDE0_TF = 0x80,
SIL_IDE0_CTL = 0x8A,
SIL_IDE0_BMDMA = 0x00,
SIL_IDE0_SCR = 0x100,
SIL_IDE1_TF = 0xC0,
SIL_IDE1_CTL = 0xCA,
SIL_IDE1_BMDMA = 0x08,
SIL_IDE1_SCR = 0x180,
};
static void sil_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio);
static void sil_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma);
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static struct pci_device_id sil_pci_tbl[] = {
{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ } /* terminate list */
};
static struct pci_driver sil_pci_driver = {
.name = DRV_NAME,
.id_table = sil_pci_tbl,
.probe = sil_init_one,
.remove = ata_pci_remove_one,
};
static Scsi_Host_Template sil_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = ata_scsi_queuecmd,
.eh_strategy_handler = ata_scsi_error,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = ATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
};
static struct ata_port_operations sil_ops = {
.port_disable = ata_port_disable,
.dev_config = sil_dev_config,
.set_piomode = sil_set_piomode,
.set_udmamode = sil_set_udmamode,
.tf_load = ata_tf_load_mmio,
.tf_read = ata_tf_read_mmio,
.check_status = ata_check_status_mmio,
.exec_command = ata_exec_command_mmio,
.phy_reset = sata_phy_reset,
.phy_config = pata_phy_config, /* not a typo */
.bmdma_start = ata_bmdma_start_mmio,
.fill_sg = ata_fill_sg,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.scr_read = sil_scr_read,
.scr_write = sil_scr_write,
};
static struct ata_port_info sil_port_info[] = {
/* sil_3112 */
{
.sht = &sil_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = 0x7f, /* udma0-6; FIXME */
.port_ops = &sil_ops,
},
};
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
{
unsigned long offset = ap->ioaddr.scr_addr;
switch (sc_reg) {
case SCR_STATUS:
return offset + 4;
case SCR_ERROR:
return offset + 8;
case SCR_CONTROL:
return offset;
default:
/* do nothing */
break;
}
return 0;
}
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
void *mmio = (void *) sil_scr_addr(ap, sc_reg);
if (mmio)
return readl(mmio);
return 0xffffffffU;
}
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{
void *mmio = (void *) sil_scr_addr(ap, sc_reg);
if (mmio)
writel(val, mmio);
}
/**
* sil_dev_config - Apply device/host-specific errata fixups
* @ap: Port containing device to be examined
* @dev: Device to be examined
*
* After the IDENTIFY [PACKET] DEVICE step is complete, and a
* device is known to be present, this function is called.
* We apply two errata fixups which are specific to Silicon Image,
* a Seagate and a Maxtor fixup.
*
* For certain Seagate devices, we must limit the maximum sectors
* to under 8K.
*
* For certain Maxtor devices, we must not program the drive
* beyond udma5.
*
* Both fixups are unfairly pessimistic. As soon as I get more
* information on these errata, I will create a more exhaustive
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*/
static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
{
const char *s = &dev->product[0];
unsigned int len = strnlen(s, sizeof(dev->product));
/* ATAPI specifies that empty space is blank-filled; remove blanks */
while ((len > 0) && (s[len - 1] == ' '))
len--;
/* limit to udma5 */
if (!memcmp(s, "Maxtor ", 7)) {
printk(KERN_INFO "ata%u(%u): applying pessimistic Maxtor errata fix\n",
ap->id, dev->devno);
ap->udma_mask &= ATA_UDMA5;
return;
}
/* limit requests to 15 sectors */
if ((len > 4) && (!memcmp(s, "ST", 2))) {
if ((!memcmp(s + len - 2, "AS", 2)) ||
(!memcmp(s + len - 3, "ASL", 3))) {
printk(KERN_INFO "ata%u(%u): applying pessimistic Seagate errata fix\n",
ap->id, dev->devno);
ap->host->max_sectors = 15;
ap->host->hostt->max_sectors = 15;
return;
}
}
}
static void sil_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio)
{
/* We need empty implementation, the core doesn't test for NULL
* function pointer
*/
}
static void sil_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma)
{
/* We need empty implementation, the core doesn't test for NULL
* function pointer
*/
}
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_probe_ent *probe_ent = NULL;
unsigned long base;
void *mmio_base;
int rc;
if (!printed_version++)
printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
goto err_out_regions;
probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
if (probe_ent == NULL) {
rc = -ENOMEM;
goto err_out_regions;
}
memset(probe_ent, 0, sizeof(*probe_ent));
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->pdev = pdev;
probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
probe_ent->sht = sil_port_info[ent->driver_data].sht;
probe_ent->n_ports = 2;
probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = SA_SHIRQ;
probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
mmio_base = ioremap(pci_resource_start(pdev, 5),
pci_resource_len(pdev, 5));
if (mmio_base == NULL) {
rc = -ENOMEM;
goto err_out_free_ent;
}
probe_ent->mmio_base = mmio_base;
base = (unsigned long) mmio_base;
probe_ent->port[0].cmd_addr = base + SIL_IDE0_TF;
probe_ent->port[0].ctl_addr = base + SIL_IDE0_CTL;
probe_ent->port[0].bmdma_addr = base + SIL_IDE0_BMDMA;
probe_ent->port[0].scr_addr = base + SIL_IDE0_SCR;
ata_std_ports(&probe_ent->port[0]);
probe_ent->port[1].cmd_addr = base + SIL_IDE1_TF;
probe_ent->port[1].ctl_addr = base + SIL_IDE1_CTL;
probe_ent->port[1].bmdma_addr = base + SIL_IDE1_BMDMA;
probe_ent->port[1].scr_addr = base + SIL_IDE1_SCR;
ata_std_ports(&probe_ent->port[1]);
pci_set_master(pdev);
/* FIXME: check ata_device_add return value */
ata_device_add(probe_ent);
kfree(probe_ent);
return 0;
err_out_free_ent:
kfree(probe_ent);
err_out_regions:
pci_release_regions(pdev);
err_out:
pci_disable_device(pdev);
return rc;
}
static int __init sil_init(void)
{
int rc;
rc = pci_module_init(&sil_pci_driver);
if (rc)
return rc;
return 0;
}
static void __exit sil_exit(void)
{
pci_unregister_driver(&sil_pci_driver);
}
module_init(sil_init);
module_exit(sil_exit);
/*
* ata_k2.c - Broadcom (Apple K2) SATA
*
* Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
*
* Bits from Jeff Garzik, Copyright RedHat, Inc.
*
* This driver probably works with non-Apple versions of the
* Broadcom chipset...
*
* The contents of this file are subject to the Open
* Software License version 1.1 that can be found at
* http://www.opensource.org/licenses/osl-1.1.txt and is included herein
* by reference.
*
* Alternatively, the contents of this file may be used under the terms
* of the GNU General Public License version 2 (the "GPL") as distributed
* in the kernel source COPYING file, in which case the provisions of
* the GPL are applicable instead of the above. If you wish to allow
* the use of your version of this file only under the terms of the
* GPL and not to allow others to use your version of this file under
* the OSL, indicate your decision by deleting the provisions above and
* replace them with the notice and other provisions required by the GPL.
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#ifdef CONFIG_ALL_PPC
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif /* CONFIG_ALL_PPC */
#define DRV_NAME "ata_k2"
#define DRV_VERSION "1.02"
static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
if (sc_reg > SCR_CONTROL)
return 0xffffffffU;
return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
u32 val)
{
if (sc_reg > SCR_CONTROL)
return;
writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
writeb(tf->ctl, ioaddr->ctl_addr);
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->error_addr);
writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
} else if (is_addr) {
writew(tf->feature, ioaddr->error_addr);
writew(tf->nsect, ioaddr->nsect_addr);
writew(tf->lbal, ioaddr->lbal_addr);
writew(tf->lbam, ioaddr->lbam_addr);
writew(tf->lbah, ioaddr->lbah_addr);
}
if (tf->flags & ATA_TFLAG_DEVICE)
writeb(tf->device, ioaddr->device_addr);
ata_wait_idle(ap);
}
static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u16 nsect, lbal, lbam, lbah;
nsect = tf->nsect = readw(ioaddr->nsect_addr);
lbal = tf->lbal = readw(ioaddr->lbal_addr);
lbam = tf->lbam = readw(ioaddr->lbam_addr);
lbah = tf->lbah = readw(ioaddr->lbah_addr);
tf->device = readw(ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
tf->hob_feature = readw(ioaddr->error_addr) >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
tf->hob_lbah = lbah >> 8;
}
}
static u8 k2_stat_check_status(struct ata_port *ap)
{
return readl((void *) ap->ioaddr.cmdstat_addr);
}
static void k2_sata_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio)
{
/* We need empty implementation, the core doesn't test for NULL
* function pointer
*/
}
static void k2_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma)
{
/* We need empty implementation, the core doesn't test for NULL
* function pointer
*/
}
#ifdef CONFIG_ALL_PPC
/*
* k2_sata_proc_info
* inout : decides on the direction of the dataflow and the meaning of the
* variables
* buffer: If inout==FALSE data is being written to it else read from it
* *start: If inout==FALSE start of the valid data in the buffer
* offset: If inout==FALSE offset from the beginning of the imaginary file
* from which we start writing into the buffer
* length: If inout==FALSE max number of bytes to be written into the buffer
* else number of bytes in the buffer
*/
static int k2_sata_proc_info(char *page, char **start, off_t offset, int count,
int hostno, int inout)
{
struct Scsi_Host *hpnt;
struct ata_port *ap;
struct device_node *np;
int len, index;
/* Find ourself. That's locking-broken, shitty etc... but thanks to
* /proc/scsi interface and lack of state kept around in this driver,
* its best I want to do for now...
*/
hpnt = scsi_hostlist;
while (hpnt) {
if (hostno == hpnt->host_no)
break;
hpnt = hpnt->next;
}
if (!hpnt)
return 0;
/* Find the ata_port */
ap = (struct ata_port *) &hpnt->hostdata[0];
if (ap == NULL)
return 0;
/* Find the OF node for the PCI device proper */
np = pci_device_to_OF_node(ap->host_set->pdev);
if (np == NULL)
return 0;
/* Match it to a port node */
index = (ap == ap->host_set->ports[0]) ? 0 : 1;
for (np = np->child; np != NULL; np = np->sibling) {
u32 *reg = (u32 *)get_property(np, "reg", NULL);
if (!reg)
continue;
if (index == *reg)
break;
}
if (np == NULL)
return 0;
len = sprintf(page, "devspec: %s\n", np->full_name);
return len;
}
#endif /* CONFIG_ALL_PPC */
static Scsi_Host_Template k2_sata_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = ata_scsi_queuecmd,
.eh_strategy_handler = ata_scsi_error,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = ATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
#ifdef CONFIG_ALL_PPC
.proc_info = k2_sata_proc_info
#endif
};
static struct ata_port_operations k2_sata_ops = {
.port_disable = ata_port_disable,
.set_piomode = k2_sata_set_piomode,
.set_udmamode = k2_sata_set_udmamode,
.tf_load = k2_sata_tf_load,
.tf_read = k2_sata_tf_read,
.check_status = k2_stat_check_status,
.exec_command = ata_exec_command_mmio,
.phy_reset = sata_phy_reset,
.phy_config = pata_phy_config, /* not a typo */
.bmdma_start = ata_bmdma_start_mmio,
.fill_sg = ata_fill_sg,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.scr_read = k2_sata_scr_read,
.scr_write = k2_sata_scr_write,
};
static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
{
port->cmd_addr = base;
port->data_addr = base;
port->error_addr = base + 0x4;
port->nsect_addr = base + 0x8;
port->lbal_addr = base + 0xc;
port->lbam_addr = base + 0x10;
port->lbah_addr = base + 0x14;
port->device_addr = base + 0x18;
port->cmdstat_addr = base + 0x1c;
port->ctl_addr = base + 0x20;
port->bmdma_addr = base + 0x30;
port->scr_addr = base + 0x40;
}
static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_probe_ent *probe_ent = NULL;
unsigned long base;
void *mmio_base;
int rc;
if (!printed_version++)
printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
goto err_out_regions;
probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
if (probe_ent == NULL) {
rc = -ENOMEM;
goto err_out_regions;
}
memset(probe_ent, 0, sizeof(*probe_ent));
probe_ent->pdev = pdev;
INIT_LIST_HEAD(&probe_ent->node);
mmio_base = ioremap(pci_resource_start(pdev, 5),
pci_resource_len(pdev, 5));
if (mmio_base == NULL) {
rc = -ENOMEM;
goto err_out_free_ent;
}
base = (unsigned long) mmio_base;
/*
* Check for the "disabled" second function to avoid registering
* useless interfaces on K2
*/
if (readl(mmio_base + 0x40) == 0xffffffffUL &&
readl(mmio_base + 0x140) == 0xffffffffUL) {
rc = -ENODEV;
goto err_out_unmap;
}
probe_ent->sht = &k2_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO;
probe_ent->port_ops = &k2_sata_ops;
probe_ent->n_ports = 2;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = SA_SHIRQ;
probe_ent->mmio_base = mmio_base;
/*
* We don't care much about the PIO/UDMA masks, but the core won't like us
* if we don't fill these
*/
probe_ent->pio_mask = 0x1f;
probe_ent->udma_mask = 0x7f;
k2_sata_setup_port(&probe_ent->port[0], base);
k2_sata_setup_port(&probe_ent->port[1], base + 0x100);
pci_set_master(pdev);
/* FIXME: check ata_device_add return value */
ata_device_add(probe_ent);
kfree(probe_ent);
return 0;
err_out_unmap:
iounmap((void *)base);
err_out_free_ent:
kfree(probe_ent);
err_out_regions:
pci_release_regions(pdev);
err_out:
pci_disable_device(pdev);
return rc;
}
static struct pci_device_id k2_sata_pci_tbl[] = {
{ 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ }
};
static struct pci_driver k2_sata_pci_driver = {
.name = DRV_NAME,
.id_table = k2_sata_pci_tbl,
.probe = k2_sata_init_one,
.remove = ata_pci_remove_one,
};
static int __init k2_sata_init(void)
{
int rc;
rc = pci_module_init(&k2_sata_pci_driver);
if (rc)
return rc;
return 0;
}
static void __exit k2_sata_exit(void)
{
pci_unregister_driver(&k2_sata_pci_driver);
}
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
module_init(k2_sata_init);
module_exit(k2_sata_exit);
/*
sata_via.c - VIA Serial ATA controllers
Copyright 2003 Red Hat, Inc. All rights reserved.
Copyright 2003 Jeff Garzik
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
http://www.opensource.org/licenses/osl-1.1.txt and is included herein
by reference.
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include "scsi.h"
#include "hosts.h"
#include <linux/libata.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "0.11"
enum {
via_sata = 0,
};
static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void svia_sata_phy_reset(struct ata_port *ap);
static void svia_port_disable(struct ata_port *ap);
static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio);
static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma);
static unsigned int in_module_init = 1;
static struct pci_device_id svia_pci_tbl[] = {
{ 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, via_sata },
{ } /* terminate list */
};
static struct pci_driver svia_pci_driver = {
.name = DRV_NAME,
.id_table = svia_pci_tbl,
.probe = svia_init_one,
.remove = ata_pci_remove_one,
};
static Scsi_Host_Template svia_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = ata_scsi_queuecmd,
.eh_strategy_handler = ata_scsi_error,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = ATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
};
static struct ata_port_operations svia_sata_ops = {
.port_disable = svia_port_disable,
.set_piomode = svia_set_piomode,
.set_udmamode = svia_set_udmamode,
.tf_load = ata_tf_load_pio,
.tf_read = ata_tf_read_pio,
.check_status = ata_check_status_pio,
.exec_command = ata_exec_command_pio,
.phy_reset = svia_sata_phy_reset,
.phy_config = pata_phy_config, /* not a typo */
.bmdma_start = ata_bmdma_start_pio,
.fill_sg = ata_fill_sg,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
};
static struct ata_port_info svia_port_info[] = {
/* via_sata */
{
.sht = &svia_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY
| ATA_FLAG_SRST,
.pio_mask = 0x03, /* pio3-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &svia_sata_ops,
},
};
static struct pci_bits svia_enable_bits[] = {
{ 0x40U, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x40U, 1U, 0x01UL, 0x01UL }, /* port 1 */
};
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
/**
* svia_sata_phy_reset -
* @ap:
*
* LOCKING:
*
*/
static void svia_sata_phy_reset(struct ata_port *ap)
{
if (!pci_test_config_bits(ap->host_set->pdev,
&svia_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return;
}
ata_port_probe(ap);
if (ap->flags & ATA_FLAG_PORT_DISABLED)
return;
ata_bus_reset(ap);
}
/**
* svia_port_disable -
* @ap:
*
* LOCKING:
*
*/
static void svia_port_disable(struct ata_port *ap)
{
ata_port_disable(ap);
/* FIXME */
}
/**
* svia_set_piomode -
* @ap:
* @adev:
* @pio:
*
* LOCKING:
*
*/
static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev,
unsigned int pio)
{
/* FIXME: needed? */
}
/**
* svia_set_udmamode -
* @ap:
* @adev:
* @udma:
*
* LOCKING:
*
*/
static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev,
unsigned int udma)
{
/* FIXME: needed? */
}
/**
* svia_init_one -
* @pdev:
* @ent:
*
* LOCKING:
*
* RETURNS:
*
*/
static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_port_info *port_info[1];
unsigned int n_ports = 1;
if (!printed_version++)
printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
/* no hotplugging support (FIXME) */
if (!in_module_init)
return -ENODEV;
port_info[0] = &svia_port_info[ent->driver_data];
return ata_pci_init_one(pdev, port_info, n_ports);
}
/**
* svia_init -
*
* LOCKING:
*
* RETURNS:
*
*/
static int __init svia_init(void)
{
int rc;
DPRINTK("pci_module_init\n");
rc = pci_module_init(&svia_pci_driver);
if (rc)
return rc;
in_module_init = 0;
DPRINTK("done\n");
return 0;
}
/**
* svia_exit -
*
* LOCKING:
*
*/
static void __exit svia_exit(void)
{
pci_unregister_driver(&svia_pci_driver);
}
module_init(svia_init);
module_exit(svia_exit);
/*
Copyright 2003 Red Hat, Inc. All rights reserved.
Copyright 2003 Jeff Garzik
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
http://www.opensource.org/licenses/osl-1.1.txt and is included herein
by reference.
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
*/
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
#define ATA_DMA_MASK 0xffffffffULL
enum {
/* various global constants */
ATA_MAX_DEVICES = 2, /* per bus/port */
ATA_MAX_PRD = 256, /* we could make these 256/256 */
ATA_SECT_SIZE = 512,
ATA_SECT_SIZE_MASK = (ATA_SECT_SIZE - 1),
ATA_SECT_DWORDS = ATA_SECT_SIZE / sizeof(u32),
ATA_ID_WORDS = 256,
ATA_ID_PROD_OFS = 27,
ATA_ID_SERNO_OFS = 10,
ATA_ID_MAJOR_VER = 80,
ATA_ID_PIO_MODES = 64,
ATA_ID_UDMA_MODES = 88,
ATA_ID_PIO4 = (1 << 1),
ATA_PCI_CTL_OFS = 2,
ATA_SERNO_LEN = 20,
ATA_UDMA0 = (1 << 0),
ATA_UDMA1 = ATA_UDMA0 | (1 << 1),
ATA_UDMA2 = ATA_UDMA1 | (1 << 2),
ATA_UDMA3 = ATA_UDMA2 | (1 << 3),
ATA_UDMA4 = ATA_UDMA3 | (1 << 4),
ATA_UDMA5 = ATA_UDMA4 | (1 << 5),
ATA_UDMA6 = ATA_UDMA5 | (1 << 6),
ATA_UDMA7 = ATA_UDMA6 | (1 << 7),
/* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */
ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */
/* DMA-related */
ATA_PRD_SZ = 8,
ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ),
ATA_PRD_EOT = (1 << 31), /* end-of-table flag */
ATA_DMA_TABLE_OFS = 4,
ATA_DMA_STATUS = 2,
ATA_DMA_CMD = 0,
ATA_DMA_WR = (1 << 3),
ATA_DMA_START = (1 << 0),
ATA_DMA_INTR = (1 << 2),
ATA_DMA_ERR = (1 << 1),
ATA_DMA_ACTIVE = (1 << 0),
/* bits in ATA command block registers */
ATA_HOB = (1 << 7), /* LBA48 selector */
ATA_NIEN = (1 << 1), /* disable-irq flag */
ATA_LBA = (1 << 6), /* LBA28 selector */
ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */
ATA_BUSY = (1 << 7), /* BSY status bit */
ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */
ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */
ATA_DRQ = (1 << 3), /* data request i/o */
ATA_ERR = (1 << 0), /* have an error */
ATA_SRST = (1 << 2), /* software reset */
ATA_ABORTED = (1 << 2), /* command aborted */
/* ATA command block registers */
ATA_REG_DATA = 0x00,
ATA_REG_ERR = 0x01,
ATA_REG_NSECT = 0x02,
ATA_REG_LBAL = 0x03,
ATA_REG_LBAM = 0x04,
ATA_REG_LBAH = 0x05,
ATA_REG_DEVICE = 0x06,
ATA_REG_STATUS = 0x07,
ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */
ATA_REG_CMD = ATA_REG_STATUS,
ATA_REG_BYTEL = ATA_REG_LBAM,
ATA_REG_BYTEH = ATA_REG_LBAH,
ATA_REG_DEVSEL = ATA_REG_DEVICE,
ATA_REG_IRQ = ATA_REG_NSECT,
/* ATA taskfile protocols */
ATA_PROT_UNKNOWN = 0,
ATA_PROT_NODATA = 1,
ATA_PROT_PIO_READ = 2,
ATA_PROT_PIO_WRITE = 3,
ATA_PROT_DMA_READ = 4,
ATA_PROT_DMA_WRITE = 5,
ATA_PROT_ATAPI = 6,
ATA_PROT_ATAPI_DMA = 7,
/* ATA device commands */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
ATA_CMD_ID_ATA = 0xEC,
ATA_CMD_ID_ATAPI = 0xA1,
ATA_CMD_READ = 0xC8,
ATA_CMD_READ_EXT = 0x25,
ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35,
ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30,
ATA_CMD_PIO_WRITE_EXT = 0x34,
ATA_CMD_SET_FEATURES = 0xEF,
ATA_CMD_PACKET = 0xA0,
/* SETFEATURES stuff */
SETFEATURES_XFER = 0x03,
XFER_UDMA_7 = 0x47,
XFER_UDMA_6 = 0x46,
XFER_UDMA_5 = 0x45,
XFER_UDMA_4 = 0x44,
XFER_UDMA_3 = 0x43,
XFER_UDMA_2 = 0x42,
XFER_UDMA_1 = 0x41,
XFER_UDMA_0 = 0x40,
XFER_PIO_4 = 0x0C,
XFER_PIO_3 = 0x0B,
/* ATAPI stuff */
ATAPI_PKT_DMA = (1 << 0),
/* cable types */
ATA_CBL_NONE = 0,
ATA_CBL_PATA40 = 1,
ATA_CBL_PATA80 = 2,
ATA_CBL_PATA_UNK = 3,
ATA_CBL_SATA = 4,
/* SATA Status and Control Registers */
SCR_STATUS = 0,
SCR_ERROR = 1,
SCR_CONTROL = 2,
SCR_ACTIVE = 3,
SCR_NOTIFICATION = 4,
};
/* core structures */
struct ata_prd {
u32 addr;
u32 flags_len;
} __attribute__((packed));
#define ata_id_is_ata(dev) (((dev)->id[0] & (1 << 15)) == 0)
#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10))
#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8))
#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9))
#define ata_id_u32(dev,n) \
(((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)]))
#define ata_id_u64(dev,n) \
( ((u64) dev->id[(n) + 3] << 48) | \
((u64) dev->id[(n) + 2] << 32) | \
((u64) dev->id[(n) + 1] << 16) | \
((u64) dev->id[(n) + 0]) )
#endif /* __LINUX_ATA_H__ */
...@@ -89,6 +89,7 @@ extern struct resource iomem_resource; ...@@ -89,6 +89,7 @@ extern struct resource iomem_resource;
extern int get_resource_list(struct resource *, char *buf, int size); extern int get_resource_list(struct resource *, char *buf, int size);
extern int request_resource(struct resource *root, struct resource *new); extern int request_resource(struct resource *root, struct resource *new);
extern struct resource * ____request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new); extern int release_resource(struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new); extern int insert_resource(struct resource *parent, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new, extern int allocate_resource(struct resource *root, struct resource *new,
......
/*
Copyright 2003 Red Hat, Inc. All rights reserved.
Copyright 2003 Jeff Garzik
The contents of this file are subject to the Open
Software License version 1.1 that can be found at
http://www.opensource.org/licenses/osl-1.1.txt and is included herein
by reference.
Alternatively, the contents of this file may be used under the terms
of the GNU General Public License version 2 (the "GPL") as distributed
in the kernel source COPYING file, in which case the provisions of
the GPL are applicable instead of the above. If you wish to allow
the use of your version of this file only under the terms of the
GPL and not to allow others to use your version of this file under
the OSL, indicate your decision by deleting the provisions above and
replace them with the notice and other provisions required by the GPL.
If you do not delete the provisions above, a recipient may use your
version of this file under either the OSL or the GPL.
*/
#ifndef __LINUX_LIBATA_H__
#define __LINUX_LIBATA_H__
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <linux/ata.h>
/*
* compile-time options
*/
#undef ATA_FORCE_PIO /* do not configure or use DMA */
#undef ATA_DEBUG /* debugging output */
#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
#undef ATA_NDEBUG /* define to disable quick runtime checks */
#undef ATA_ENABLE_ATAPI /* define to enable ATAPI support */
#undef ATA_ENABLE_PATA /* define to enable PATA support in some
* low-level drivers */
/* note: prints function name for you */
#ifdef ATA_DEBUG
#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
#ifdef ATA_VERBOSE_DEBUG
#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
#else
#define VPRINTK(fmt, args...)
#endif /* ATA_VERBOSE_DEBUG */
#else
#define DPRINTK(fmt, args...)
#define VPRINTK(fmt, args...)
#endif /* ATA_DEBUG */
#ifdef ATA_NDEBUG
#define assert(expr)
#else
#define assert(expr) \
if(unlikely(!(expr))) { \
printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
#expr,__FILE__,__FUNCTION__,__LINE__); \
}
#endif
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
#define ATA_DMA_BOUNDARY 0xffffUL
#define ATA_DMA_MASK 0xffffffffULL
enum {
/* various global constants */
ATA_MAX_PORTS = 8,
ATA_DEF_QUEUE = 1,
ATA_MAX_QUEUE = 1,
ATA_MAX_SECTORS = 200, /* FIXME */
ATA_MAX_BUS = 2,
ATA_DEF_BUSY_WAIT = 10000,
ATA_SHORT_PAUSE = (HZ >> 6) + 1,
ATA_SHT_EMULATED = 1,
ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_device stuff */
ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
ATA_DFLAG_MASTER = (1 << 2), /* is device 0? */
ATA_DFLAG_WCACHE = (1 << 3), /* has write cache we can
* (hopefully) flush? */
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
ATA_DEV_ATAPI = 3, /* ATAPI device */
ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
ATA_DEV_NONE = 5, /* no device */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
ATA_FLAG_SATA = (1 << 3),
ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */
ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0),
ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
ATA_QCFLAG_WRITE = (1 << 0), /* read==0, write==1 */
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMA = (1 << 2), /* data delivered via DMA */
ATA_QCFLAG_ATAPI = (1 << 3), /* is ATAPI packet command? */
ATA_QCFLAG_SG = (1 << 4), /* have s/g table? */
ATA_QCFLAG_POLL = (1 << 5), /* polling, no interrupts */
/* struct ata_engine atomic flags (use test_bit, etc.) */
ATA_EFLG_ACTIVE = 0, /* engine is active */
/* various lengths of time */
ATA_TMOUT_EDD = 5 * HZ, /* hueristic */
ATA_TMOUT_PIO = 30 * HZ,
ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */
ATA_TMOUT_CDB = 30 * HZ,
ATA_TMOUT_CDB_QUICK = 5 * HZ,
/* ATA bus states */
BUS_UNKNOWN = 0,
BUS_DMA = 1,
BUS_IDLE = 2,
BUS_NOINTR = 3,
BUS_NODATA = 4,
BUS_TIMER = 5,
BUS_PIO = 6,
BUS_EDD = 7,
BUS_IDENTIFY = 8,
BUS_PACKET = 9,
/* thread states */
THR_UNKNOWN = 0,
THR_PORT_RESET = (THR_UNKNOWN + 1),
THR_AWAIT_DEATH = (THR_PORT_RESET + 1),
THR_PROBE_FAILED = (THR_AWAIT_DEATH + 1),
THR_IDLE = (THR_PROBE_FAILED + 1),
THR_PROBE_SUCCESS = (THR_IDLE + 1),
THR_PROBE_START = (THR_PROBE_SUCCESS + 1),
THR_PIO_POLL = (THR_PROBE_START + 1),
THR_PIO_TMOUT = (THR_PIO_POLL + 1),
THR_PIO = (THR_PIO_TMOUT + 1),
THR_PIO_LAST = (THR_PIO + 1),
THR_PIO_LAST_POLL = (THR_PIO_LAST + 1),
THR_PIO_ERR = (THR_PIO_LAST_POLL + 1),
THR_PACKET = (THR_PIO_ERR + 1),
/* SATA port states */
PORT_UNKNOWN = 0,
PORT_ENABLED = 1,
PORT_DISABLED = 2,
/* ata_qc_cb_t flags - note uses above ATA_QCFLAG_xxx namespace,
* but not numberspace
*/
ATA_QCFLAG_TIMEOUT = (1 << 0),
};
/* forward declarations */
struct ata_port_operations;
struct ata_port;
struct ata_queued_cmd;
/* typedefs */
typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc, unsigned int flags);
struct ata_ioports {
unsigned long cmd_addr;
unsigned long data_addr;
unsigned long error_addr;
unsigned long nsect_addr;
unsigned long lbal_addr;
unsigned long lbam_addr;
unsigned long lbah_addr;
unsigned long device_addr;
unsigned long cmdstat_addr;
unsigned long ctl_addr;
unsigned long bmdma_addr;
unsigned long scr_addr;
};
struct ata_probe_ent {
struct list_head node;
struct pci_dev *pdev;
struct ata_port_operations *port_ops;
Scsi_Host_Template *sht;
struct ata_ioports port[ATA_MAX_PORTS];
unsigned int n_ports;
unsigned int pio_mask;
unsigned int udma_mask;
unsigned int legacy_mode;
unsigned long irq;
unsigned int irq_flags;
unsigned long host_flags;
void *mmio_base;
};
struct ata_host_set {
spinlock_t lock;
struct pci_dev *pdev;
unsigned long irq;
void *mmio_base;
unsigned int n_ports;
struct ata_port * ports[0];
};
struct ata_taskfile {
unsigned long flags; /* ATA_TFLAG_xxx */
u8 protocol; /* ATA_PROT_xxx */
u8 ctl; /* control reg */
u8 hob_feature; /* additional data */
u8 hob_nsect; /* to support LBA48 */
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
u8 feature;
u8 nsect;
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 command; /* IO operation */
};
struct ata_queued_cmd {
struct ata_port *ap;
struct ata_device *dev;
Scsi_Cmnd *scsicmd;
void (*scsidone)(Scsi_Cmnd *);
struct list_head node;
unsigned long flags; /* ATA_QCFLAG_xxx */
unsigned int tag;
unsigned int n_elem;
unsigned int nsect;
unsigned int cursect;
unsigned int cursg;
unsigned int cursg_ofs;
struct ata_taskfile tf;
struct scatterlist sgent;
struct scatterlist *sg;
ata_qc_cb_t callback;
struct semaphore sem;
};
struct ata_host_stats {
unsigned long unhandled_irq;
unsigned long idle_irq;
unsigned long rw_reqbuf;
};
struct ata_device {
u64 n_sectors; /* size of device, if ATA */
unsigned long flags; /* ATA_DFLAG_xxx */
unsigned int class; /* ATA_DEV_xxx */
unsigned int devno; /* 0 or 1 */
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
unsigned int pio_mode;
unsigned int udma_mode;
unsigned char vendor[8]; /* space-padded, not ASCIIZ */
unsigned char product[32]; /* WARNING: shorter than
* ATAPI7 spec size, 40 ASCII
* characters
*/
};
struct ata_engine {
unsigned long flags;
struct list_head q;
};
struct ata_port {
struct Scsi_Host *host; /* our co-allocated scsi host */
struct ata_port_operations *ops;
unsigned long flags; /* ATA_FLAG_xxx */
unsigned int id; /* unique id req'd by scsi midlyr */
unsigned int port_no; /* unique port #; from zero */
struct ata_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
u8 ctl; /* cache of ATA control register */
unsigned int bus_state;
unsigned int port_state;
unsigned int pio_mask;
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_engine eng;
struct ata_device device[ATA_MAX_DEVICES];
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
unsigned long qactive;
unsigned int active_tag;
struct ata_host_stats stats;
struct ata_host_set *host_set;
struct semaphore sem;
struct semaphore probe_sem;
unsigned int thr_state;
int time_to_die;
pid_t thr_pid;
struct completion thr_exited;
struct semaphore thr_sem;
struct timer_list thr_timer;
unsigned long thr_timeout;
};
struct ata_port_operations {
void (*port_disable) (struct ata_port *);
void (*dev_config) (struct ata_port *, struct ata_device *);
void (*set_piomode) (struct ata_port *, struct ata_device *,
unsigned int);
void (*set_udmamode) (struct ata_port *, struct ata_device *,
unsigned int);
void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
u8 (*check_status)(struct ata_port *ap);
void (*phy_reset) (struct ata_port *ap);
void (*phy_config) (struct ata_port *ap);
void (*bmdma_start) (struct ata_queued_cmd *qc);
void (*fill_sg) (struct ata_queued_cmd *qc);
void (*eng_timeout) (struct ata_port *ap);
irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
u32 val);
};
struct ata_port_info {
Scsi_Host_Template *sht;
unsigned long host_flags;
unsigned long pio_mask;
unsigned long udma_mask;
struct ata_port_operations *port_ops;
};
struct pci_bits {
unsigned int reg; /* PCI config register to read */
unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
unsigned long mask;
unsigned long val;
};
extern void ata_port_probe(struct ata_port *);
extern void pata_phy_config(struct ata_port *ap);
extern void sata_phy_reset(struct ata_port *ap);
extern void ata_bus_reset(struct ata_port *ap);
extern void ata_port_disable(struct ata_port *);
extern void ata_std_ports(struct ata_ioports *ioaddr);
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
extern int ata_device_add(struct ata_probe_ent *ent);
extern int ata_scsi_detect(Scsi_Host_Template *sht);
extern int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
extern int ata_scsi_error(struct Scsi_Host *host);
extern int ata_scsi_release(struct Scsi_Host *host);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
/*
* Default driver ops implementations
*/
extern void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf);
extern u8 ata_check_status_pio(struct ata_port *ap);
extern u8 ata_check_status_mmio(struct ata_port *ap);
extern void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
extern void ata_fill_sg(struct ata_queued_cmd *qc);
extern void ata_bmdma_start_mmio (struct ata_queued_cmd *qc);
extern void ata_bmdma_start_pio (struct ata_queued_cmd *qc);
extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late);
extern void ata_eng_timeout(struct ata_port *ap);
static inline unsigned long msecs_to_jiffies(unsigned long msecs)
{
return ((HZ * msecs + 999) / 1000);
}
static inline unsigned int ata_tag_valid(unsigned int tag)
{
return (tag < ATA_MAX_QUEUE) ? 1 : 0;
}
static inline unsigned int ata_dev_present(struct ata_device *dev)
{
return ((dev->class == ATA_DEV_ATA) ||
(dev->class == ATA_DEV_ATAPI));
}
static inline u8 ata_chk_err(struct ata_port *ap)
{
if (ap->flags & ATA_FLAG_MMIO) {
return readb((void *) ap->ioaddr.error_addr);
}
return inb(ap->ioaddr.error_addr);
}
static inline u8 ata_chk_status(struct ata_port *ap)
{
return ap->ops->check_status(ap);
}
static inline u8 ata_altstatus(struct ata_port *ap)
{
if (ap->flags & ATA_FLAG_MMIO)
return readb(ap->ioaddr.ctl_addr);
return inb(ap->ioaddr.ctl_addr);
}
static inline void ata_pause(struct ata_port *ap)
{
ata_altstatus(ap);
ndelay(400);
}
static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
unsigned int max)
{
u8 status;
do {
udelay(10);
status = ata_chk_status(ap);
max--;
} while ((status & bits) && (max > 0));
return status;
}
static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
if (status & (ATA_BUSY | ATA_DRQ)) {
unsigned long l = ap->ioaddr.cmdstat_addr;
printk(KERN_WARNING
"ATA: abnormal status 0x%X on port 0x%lX\n",
status, l);
}
return status;
}
static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap,
unsigned int tag)
{
if (likely(ata_tag_valid(tag)))
return &ap->qcmd[tag];
return NULL;
}
static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device)
{
memset(tf, 0, sizeof(*tf));
tf->ctl = ap->ctl;
if (device == 0)
tf->device = ATA_DEVICE_OBS;
else
tf->device = ATA_DEVICE_OBS | ATA_DEV1;
}
static inline u8 ata_irq_on(struct ata_port *ap)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
ap->ctl &= ~ATA_NIEN;
if (ap->flags & ATA_FLAG_MMIO)
writeb(ap->ctl, ioaddr->ctl_addr);
else
outb(ap->ctl, ioaddr->ctl_addr);
return ata_wait_idle(ap);
}
static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
{
unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
u8 host_stat, post_stat, status;
status = ata_busy_wait(ap, bits, 1000);
if (status & bits)
DPRINTK("abnormal status 0x%X\n", status);
/* get controller status; clear intr, err bits */
if (ap->flags & ATA_FLAG_MMIO) {
void *mmio = (void *) ap->ioaddr.bmdma_addr;
host_stat = readb(mmio + ATA_DMA_STATUS);
writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
mmio + ATA_DMA_STATUS);
post_stat = readb(mmio + ATA_DMA_STATUS);
} else {
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
host_stat, post_stat, status);
return status;
}
static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
{
return ap->ops->scr_read(ap, reg);
}
static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
{
ap->ops->scr_write(ap, reg, val);
}
static inline unsigned int sata_dev_present(struct ata_port *ap)
{
return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
}
#endif /* __LINUX_LIBATA_H__ */
...@@ -206,6 +206,18 @@ int request_resource(struct resource *root, struct resource *new) ...@@ -206,6 +206,18 @@ int request_resource(struct resource *root, struct resource *new)
EXPORT_SYMBOL(request_resource); EXPORT_SYMBOL(request_resource);
struct resource *____request_resource(struct resource *root, struct resource *new)
{
struct resource *conflict;
write_lock(&resource_lock);
conflict = __request_resource(root, new);
write_unlock(&resource_lock);
return conflict;
}
EXPORT_SYMBOL(____request_resource);
int release_resource(struct resource *old) int release_resource(struct resource *old)
{ {
int retval; int retval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment